From 130500218a794f15df522c3ba5a31acbc77209e4 Mon Sep 17 00:00:00 2001 From: rishavz_sagar Date: Tue, 23 Jul 2024 11:08:10 +0530 Subject: [PATCH 01/68] Caching avg total bytes and avg free bytes inside ClusterInfo (#14851) Signed-off-by: RS146BIJAY --- .../org/opensearch/cluster/ClusterInfo.java | 37 +++++++++++++++ .../decider/DiskThresholdDecider.java | 45 +++++++++---------- .../decider/DiskThresholdDeciderTests.java | 13 ------ 3 files changed, 57 insertions(+), 38 deletions(-) diff --git a/server/src/main/java/org/opensearch/cluster/ClusterInfo.java b/server/src/main/java/org/opensearch/cluster/ClusterInfo.java index 4c38d6fd99f5d..7216c447acc3e 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterInfo.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterInfo.java @@ -33,6 +33,7 @@ package org.opensearch.cluster; import org.opensearch.Version; +import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; @@ -68,6 +69,8 @@ public class ClusterInfo implements ToXContentFragment, Writeable { final Map routingToDataPath; final Map reservedSpace; final Map nodeFileCacheStats; + private long avgTotalBytes; + private long avgFreeByte; protected ClusterInfo() { this(Map.of(), Map.of(), Map.of(), Map.of(), Map.of(), Map.of()); @@ -97,6 +100,7 @@ public ClusterInfo( this.routingToDataPath = routingToDataPath; this.reservedSpace = reservedSpace; this.nodeFileCacheStats = nodeFileCacheStats; + calculateAvgFreeAndTotalBytes(mostAvailableSpaceUsage); } public ClusterInfo(StreamInput in) throws IOException { @@ -117,6 +121,39 @@ public ClusterInfo(StreamInput in) throws IOException { } else { this.nodeFileCacheStats = Map.of(); } + + calculateAvgFreeAndTotalBytes(mostAvailableSpaceUsage); + } + + /** + * Returns a {@link DiskUsage} for the {@link RoutingNode} using the + * average usage of other nodes in the disk usage map. + * @param usages Map of nodeId to DiskUsage for all known nodes + */ + private void calculateAvgFreeAndTotalBytes(final Map usages) { + if (usages == null || usages.isEmpty()) { + this.avgTotalBytes = 0; + this.avgFreeByte = 0; + return; + } + + long totalBytes = 0; + long freeBytes = 0; + for (DiskUsage du : usages.values()) { + totalBytes += du.getTotalBytes(); + freeBytes += du.getFreeBytes(); + } + + this.avgTotalBytes = totalBytes / usages.size(); + this.avgFreeByte = freeBytes / usages.size(); + } + + public long getAvgFreeByte() { + return avgFreeByte; + } + + public long getAvgTotalBytes() { + return avgTotalBytes; } @Override diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index efa5115939d3c..5fc3f282f33f7 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -140,9 +140,8 @@ public static long sizeOfRelocatingShards( // Where reserved space is unavailable (e.g. stats are out-of-sync) compute a conservative estimate for initialising shards final List initializingShards = node.shardsWithState(ShardRoutingState.INITIALIZING); - initializingShards.removeIf(shardRouting -> reservedSpace.containsShardId(shardRouting.shardId())); for (ShardRouting routing : initializingShards) { - if (routing.relocatingNodeId() == null) { + if (routing.relocatingNodeId() == null || reservedSpace.containsShardId(routing.shardId())) { // in practice the only initializing-but-not-relocating shards with a nonzero expected shard size will be ones created // by a resize (shrink/split/clone) operation which we expect to happen using hard links, so they shouldn't be taking // any additional space and can be ignored here @@ -230,7 +229,14 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing // subtractLeavingShards is passed as false here, because they still use disk space, and therefore we should be extra careful // and take the size into account - final DiskUsageWithRelocations usage = getDiskUsage(node, allocation, usages, false); + final DiskUsageWithRelocations usage = getDiskUsage( + node, + allocation, + usages, + clusterInfo.getAvgFreeByte(), + clusterInfo.getAvgTotalBytes(), + false + ); // First, check that the node currently over the low watermark double freeDiskPercentage = usage.getFreeDiskAsPercentage(); // Cache the used disk percentage for displaying disk percentages consistent with documentation @@ -492,7 +498,14 @@ public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAl // subtractLeavingShards is passed as true here, since this is only for shards remaining, we will *eventually* have enough disk // since shards are moving away. No new shards will be incoming since in canAllocate we pass false for this check. - final DiskUsageWithRelocations usage = getDiskUsage(node, allocation, usages, true); + final DiskUsageWithRelocations usage = getDiskUsage( + node, + allocation, + usages, + clusterInfo.getAvgFreeByte(), + clusterInfo.getAvgTotalBytes(), + true + ); final String dataPath = clusterInfo.getDataPath(shardRouting); // If this node is already above the high threshold, the shard cannot remain (get it off!) final double freeDiskPercentage = usage.getFreeDiskAsPercentage(); @@ -581,13 +594,15 @@ private DiskUsageWithRelocations getDiskUsage( RoutingNode node, RoutingAllocation allocation, final Map usages, + final long avgFreeBytes, + final long avgTotalBytes, boolean subtractLeavingShards ) { DiskUsage usage = usages.get(node.nodeId()); if (usage == null) { // If there is no usage, and we have other nodes in the cluster, // use the average usage for all nodes as the usage for this node - usage = averageUsage(node, usages); + usage = new DiskUsage(node.nodeId(), node.node().getName(), "_na_", avgTotalBytes, avgFreeBytes); if (logger.isDebugEnabled()) { logger.debug( "unable to determine disk usage for {}, defaulting to average across nodes [{} total] [{} free] [{}% free]", @@ -619,26 +634,6 @@ private DiskUsageWithRelocations getDiskUsage( return diskUsageWithRelocations; } - /** - * Returns a {@link DiskUsage} for the {@link RoutingNode} using the - * average usage of other nodes in the disk usage map. - * @param node Node to return an averaged DiskUsage object for - * @param usages Map of nodeId to DiskUsage for all known nodes - * @return DiskUsage representing given node using the average disk usage - */ - DiskUsage averageUsage(RoutingNode node, final Map usages) { - if (usages.size() == 0) { - return new DiskUsage(node.nodeId(), node.node().getName(), "_na_", 0, 0); - } - long totalBytes = 0; - long freeBytes = 0; - for (DiskUsage du : usages.values()) { - totalBytes += du.getTotalBytes(); - freeBytes += du.getFreeBytes(); - } - return new DiskUsage(node.nodeId(), node.node().getName(), "_na_", totalBytes / usages.size(), freeBytes / usages.size()); - } - /** * Given the DiskUsage for a node and the size of the shard, return the * percentage of free disk if the shard were to be allocated to the node. diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java index 652633e689b93..2e24640fe858d 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java @@ -863,19 +863,6 @@ public void testUnknownDiskUsage() { assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1)); } - public void testAverageUsage() { - RoutingNode rn = new RoutingNode("node1", newNode("node1")); - DiskThresholdDecider decider = makeDecider(Settings.EMPTY); - - final Map usages = new HashMap<>(); - usages.put("node2", new DiskUsage("node2", "n2", "/dev/null", 100, 50)); // 50% used - usages.put("node3", new DiskUsage("node3", "n3", "/dev/null", 100, 0)); // 100% used - - DiskUsage node1Usage = decider.averageUsage(rn, usages); - assertThat(node1Usage.getTotalBytes(), equalTo(100L)); - assertThat(node1Usage.getFreeBytes(), equalTo(25L)); - } - public void testFreeDiskPercentageAfterShardAssigned() { DiskThresholdDecider decider = makeDecider(Settings.EMPTY); From e485856e2794de2b019be34a50df389dac136b89 Mon Sep 17 00:00:00 2001 From: Liyun Xiu Date: Tue, 23 Jul 2024 20:14:26 +0800 Subject: [PATCH 02/68] Use default value when index.number_of_replicas is null (#14812) * Use default value when index.number_of_replicas is null Signed-off-by: Liyun Xiu * Add integration test Signed-off-by: Liyun Xiu * Add changelog Signed-off-by: Liyun Xiu --------- Signed-off-by: Liyun Xiu --- CHANGELOG.md | 1 + .../admin/indices/create/CreateIndexIT.java | 24 +++++++++++++++++ .../metadata/MetadataCreateIndexService.java | 3 ++- .../MetadataCreateIndexServiceTests.java | 27 +++++++++++++++++++ 4 files changed, 54 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 29c78ea7e3e4f..5a54c5150da76 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -85,6 +85,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix create or update alias API doesn't throw exception for unsupported parameters ([#14719](https://github.com/opensearch-project/OpenSearch/pull/14719)) - Refactoring FilterPath.parse by using an iterative approach ([#14200](https://github.com/opensearch-project/OpenSearch/pull/14200)) - Refactoring Grok.validatePatternBank by using an iterative approach ([#14206](https://github.com/opensearch-project/OpenSearch/pull/14206)) +- Fix NPE when creating index with index.number_of_replicas set to null ([#14812](https://github.com/opensearch-project/OpenSearch/pull/14812)) - Update help output for _cat ([#14722](https://github.com/opensearch-project/OpenSearch/pull/14722)) - Fix bulk upsert ignores the default_pipeline and final_pipeline when auto-created index matches the index template ([#12891](https://github.com/opensearch-project/OpenSearch/pull/12891)) - Fix NPE in ReplicaShardAllocator ([#14385](https://github.com/opensearch-project/OpenSearch/pull/14385)) diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/CreateIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/CreateIndexIT.java index 1c182b05fa4a8..fbe713d9e22c4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/CreateIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/CreateIndexIT.java @@ -406,4 +406,28 @@ public void testIndexNameInResponse() { assertEquals("Should have index name in response", "foo", response.index()); } + public void testCreateIndexWithNullReplicaCountPickUpClusterReplica() { + int numReplicas = 3; + String indexName = "test-idx-1"; + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put("cluster.default_number_of_replicas", numReplicas).build()) + .get() + ); + Settings settings = Settings.builder() + .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) + .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), (String) null) + .build(); + assertAcked(client().admin().indices().prepareCreate(indexName).setSettings(settings).get()); + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, internalCluster().getClusterManagerName()); + for (IndexService indexService : indicesService) { + assertEquals(indexName, indexService.index().getName()); + assertEquals( + numReplicas, + (int) indexService.getIndexSettings().getSettings().getAsInt(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, null) + ); + } + } } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index 7973745ce84b3..50d25b11ef810 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -946,7 +946,8 @@ static Settings aggregateIndexSettings( if (INDEX_NUMBER_OF_SHARDS_SETTING.exists(indexSettingsBuilder) == false) { indexSettingsBuilder.put(SETTING_NUMBER_OF_SHARDS, INDEX_NUMBER_OF_SHARDS_SETTING.get(settings)); } - if (INDEX_NUMBER_OF_REPLICAS_SETTING.exists(indexSettingsBuilder) == false) { + if (INDEX_NUMBER_OF_REPLICAS_SETTING.exists(indexSettingsBuilder) == false + || indexSettingsBuilder.get(SETTING_NUMBER_OF_REPLICAS) == null) { indexSettingsBuilder.put(SETTING_NUMBER_OF_REPLICAS, DEFAULT_REPLICA_COUNT_SETTING.get(currentState.metadata().settings())); } if (settings.get(SETTING_AUTO_EXPAND_REPLICAS) != null && indexSettingsBuilder.get(SETTING_AUTO_EXPAND_REPLICAS) == null) { diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index 0d86cfcca389c..86ca8b3ad6319 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -2151,6 +2151,33 @@ public void testAsyncDurabilityThrowsExceptionWhenRestrictSettingTrue() { ); } + public void testAggregateIndexSettingsIndexReplicaIsSetToNull() { + // This checks that aggregateIndexSettings works for the case when the index setting `index.number_of_replicas` is set to null + request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); + request.settings(Settings.builder().putNull(SETTING_NUMBER_OF_REPLICAS).build()); + Integer clusterDefaultReplicaNumber = 5; + Metadata metadata = new Metadata.Builder().persistentSettings( + Settings.builder().put("cluster.default_number_of_replicas", clusterDefaultReplicaNumber).build() + ).build(); + ClusterState clusterState = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .build(); + Settings settings = Settings.builder().put(CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING.getKey(), true).build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + Settings aggregatedSettings = aggregateIndexSettings( + clusterState, + request, + Settings.EMPTY, + null, + Settings.EMPTY, + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + randomShardLimitService(), + Collections.emptySet(), + clusterSettings + ); + assertEquals(clusterDefaultReplicaNumber.toString(), aggregatedSettings.get(SETTING_NUMBER_OF_REPLICAS)); + } + public void testRequestDurabilityWhenRestrictSettingTrue() { // This checks that aggregateIndexSettings works for the case when the cluster setting // cluster.remote_store.index.restrict.async-durability is false or not set, it allows all types of durability modes From f85a58f64e5aaba76eb519e309881f288aff8fa6 Mon Sep 17 00:00:00 2001 From: shailendra0811 <167273922+shailendra0811@users.noreply.github.com> Date: Tue, 23 Jul 2024 18:10:32 +0530 Subject: [PATCH 03/68] [Remote Routing Table] Implement write and read flow for shard diff file. (#14684) * Implement write and read flow to upload/download shard diff file. Signed-off-by: Shailendra Singh --- CHANGELOG.md | 1 + .../remote/RemoteRoutingTableServiceIT.java | 97 +++++- .../routing/RoutingTableIncrementalDiff.java | 168 ++++++++++ .../InternalRemoteRoutingTableService.java | 73 +++- .../remote/NoopRemoteRoutingTableService.java | 33 +- .../remote/RemoteRoutingTableService.java | 48 ++- .../remote/ClusterMetadataManifest.java | 15 +- .../remote/ClusterStateDiffManifest.java | 60 +++- .../RemoteClusterStateCleanupManager.java | 26 ++ .../remote/RemoteClusterStateService.java | 94 +++++- .../remote/RemoteClusterStateUtils.java | 1 + .../remote/RemotePersistenceStats.java | 11 + .../model/RemoteClusterMetadataManifest.java | 7 +- .../routingtable/RemoteRoutingTableDiff.java | 150 +++++++++ .../RemoteRoutingTableServiceTests.java | 165 ++++++++- .../remote/ClusterMetadataManifestTests.java | 81 ++++- ...RemoteClusterStateCleanupManagerTests.java | 146 ++++++++ .../RemoteClusterStateServiceTests.java | 177 +++++++++- .../model/ClusterStateDiffManifestTests.java | 69 +++- .../RemoteIndexRoutingTableDiffTests.java | 317 ++++++++++++++++++ 20 files changed, 1663 insertions(+), 76 deletions(-) create mode 100644 server/src/main/java/org/opensearch/cluster/routing/RoutingTableIncrementalDiff.java create mode 100644 server/src/main/java/org/opensearch/gateway/remote/routingtable/RemoteRoutingTableDiff.java create mode 100644 server/src/test/java/org/opensearch/gateway/remote/routingtable/RemoteIndexRoutingTableDiffTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 5a54c5150da76..c8f185ca2bb3d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add matchesPluginSystemIndexPattern to SystemIndexRegistry ([#14750](https://github.com/opensearch-project/OpenSearch/pull/14750)) - Add Plugin interface for loading application based configuration templates (([#14659](https://github.com/opensearch-project/OpenSearch/issues/14659))) - Refactor remote-routing-table service inline with remote state interfaces([#14668](https://github.com/opensearch-project/OpenSearch/pull/14668)) +- Add shard-diff path to diff manifest to reduce number of read calls remote store (([#14684](https://github.com/opensearch-project/OpenSearch/pull/14684))) - Add SortResponseProcessor to Search Pipelines (([#14785](https://github.com/opensearch-project/OpenSearch/issues/14785))) - Add prefix mode verification setting for repository verification (([#14790](https://github.com/opensearch-project/OpenSearch/pull/14790))) - Add SplitResponseProcessor to Search Pipelines (([#14800](https://github.com/opensearch-project/OpenSearch/issues/14800))) diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteRoutingTableServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteRoutingTableServiceIT.java index 53764c0b4d0e8..b0d046cbdf3db 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteRoutingTableServiceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteRoutingTableServiceIT.java @@ -8,6 +8,7 @@ package org.opensearch.gateway.remote; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.opensearch.action.admin.cluster.state.ClusterStateRequest; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.IndexRoutingTable; @@ -32,16 +33,19 @@ import java.util.Optional; import java.util.Set; import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import static org.opensearch.common.util.FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL; import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; import static org.opensearch.gateway.remote.routingtable.RemoteIndexRoutingTable.INDEX_ROUTING_TABLE; +import static org.opensearch.indices.IndicesService.CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteRoutingTableServiceIT extends RemoteStoreBaseIntegTestCase { private static final String INDEX_NAME = "test-index"; + private static final String INDEX_NAME_1 = "test-index-1"; BlobPath indexRoutingPath; AtomicInteger indexRoutingFiles = new AtomicInteger(); private final RemoteStoreEnums.PathType pathType = RemoteStoreEnums.PathType.HASHED_PREFIX; @@ -72,7 +76,13 @@ public void testRemoteRoutingTableIndexLifecycle() throws Exception { RemoteClusterStateService.class ); RemoteManifestManager remoteManifestManager = remoteClusterStateService.getRemoteManifestManager(); - verifyUpdatesInManifestFile(remoteManifestManager); + Optional latestManifest = remoteManifestManager.getLatestClusterMetadataManifest( + getClusterState().getClusterName().value(), + getClusterState().getMetadata().clusterUUID() + ); + List expectedIndexNames = new ArrayList<>(); + List deletedIndexNames = new ArrayList<>(); + verifyUpdatesInManifestFile(latestManifest, expectedIndexNames, 1, deletedIndexNames, true); List routingTableVersions = getRoutingTableFromAllNodes(); assertTrue(areRoutingTablesSame(routingTableVersions)); @@ -86,7 +96,11 @@ public void testRemoteRoutingTableIndexLifecycle() throws Exception { assertTrue(indexRoutingFilesAfterUpdate >= indexRoutingFiles.get() + 3); }); - verifyUpdatesInManifestFile(remoteManifestManager); + latestManifest = remoteManifestManager.getLatestClusterMetadataManifest( + getClusterState().getClusterName().value(), + getClusterState().getMetadata().clusterUUID() + ); + verifyUpdatesInManifestFile(latestManifest, expectedIndexNames, 1, deletedIndexNames, true); routingTableVersions = getRoutingTableFromAllNodes(); assertTrue(areRoutingTablesSame(routingTableVersions)); @@ -98,6 +112,42 @@ public void testRemoteRoutingTableIndexLifecycle() throws Exception { assertTrue(areRoutingTablesSame(routingTableVersions)); } + public void testRemoteRoutingTableEmptyRoutingTableDiff() throws Exception { + prepareClusterAndVerifyRepository(); + + RemoteClusterStateService remoteClusterStateService = internalCluster().getClusterManagerNodeInstance( + RemoteClusterStateService.class + ); + RemoteManifestManager remoteManifestManager = remoteClusterStateService.getRemoteManifestManager(); + Optional latestManifest = remoteManifestManager.getLatestClusterMetadataManifest( + getClusterState().getClusterName().value(), + getClusterState().getMetadata().clusterUUID() + ); + List expectedIndexNames = new ArrayList<>(); + List deletedIndexNames = new ArrayList<>(); + verifyUpdatesInManifestFile(latestManifest, expectedIndexNames, 1, deletedIndexNames, true); + + List routingTableVersions = getRoutingTableFromAllNodes(); + assertTrue(areRoutingTablesSame(routingTableVersions)); + + // Update cluster settings + ClusterUpdateSettingsResponse response = client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.getKey(), 0, TimeUnit.SECONDS)) + .get(); + assertTrue(response.isAcknowledged()); + + latestManifest = remoteManifestManager.getLatestClusterMetadataManifest( + getClusterState().getClusterName().value(), + getClusterState().getMetadata().clusterUUID() + ); + verifyUpdatesInManifestFile(latestManifest, expectedIndexNames, 1, deletedIndexNames, false); + + routingTableVersions = getRoutingTableFromAllNodes(); + assertTrue(areRoutingTablesSame(routingTableVersions)); + } + public void testRemoteRoutingTableIndexNodeRestart() throws Exception { BlobStoreRepository repository = prepareClusterAndVerifyRepository(); @@ -124,10 +174,16 @@ public void testRemoteRoutingTableIndexNodeRestart() throws Exception { RemoteClusterStateService.class ); RemoteManifestManager remoteManifestManager = remoteClusterStateService.getRemoteManifestManager(); - verifyUpdatesInManifestFile(remoteManifestManager); + Optional latestManifest = remoteManifestManager.getLatestClusterMetadataManifest( + getClusterState().getClusterName().value(), + getClusterState().getMetadata().clusterUUID() + ); + List expectedIndexNames = new ArrayList<>(); + List deletedIndexNames = new ArrayList<>(); + verifyUpdatesInManifestFile(latestManifest, expectedIndexNames, 1, deletedIndexNames, true); } - public void testRemoteRoutingTableIndexMasterRestart1() throws Exception { + public void testRemoteRoutingTableIndexMasterRestart() throws Exception { BlobStoreRepository repository = prepareClusterAndVerifyRepository(); List routingTableVersions = getRoutingTableFromAllNodes(); @@ -153,7 +209,13 @@ public void testRemoteRoutingTableIndexMasterRestart1() throws Exception { RemoteClusterStateService.class ); RemoteManifestManager remoteManifestManager = remoteClusterStateService.getRemoteManifestManager(); - verifyUpdatesInManifestFile(remoteManifestManager); + Optional latestManifest = remoteManifestManager.getLatestClusterMetadataManifest( + getClusterState().getClusterName().value(), + getClusterState().getMetadata().clusterUUID() + ); + List expectedIndexNames = new ArrayList<>(); + List deletedIndexNames = new ArrayList<>(); + verifyUpdatesInManifestFile(latestManifest, expectedIndexNames, 1, deletedIndexNames, true); } private BlobStoreRepository prepareClusterAndVerifyRepository() throws Exception { @@ -208,18 +270,23 @@ private BlobPath getIndexRoutingPath(BlobPath indexRoutingPath, String indexUUID ); } - private void verifyUpdatesInManifestFile(RemoteManifestManager remoteManifestManager) { - Optional latestManifest = remoteManifestManager.getLatestClusterMetadataManifest( - getClusterState().getClusterName().value(), - getClusterState().getMetadata().clusterUUID() - ); + private void verifyUpdatesInManifestFile( + Optional latestManifest, + List expectedIndexNames, + int expectedIndicesRoutingFilesInManifest, + List expectedDeletedIndex, + boolean isRoutingTableDiffFileExpected + ) { assertTrue(latestManifest.isPresent()); ClusterMetadataManifest manifest = latestManifest.get(); - assertTrue(manifest.getDiffManifest().getIndicesRoutingUpdated().contains(INDEX_NAME)); - assertTrue(manifest.getDiffManifest().getIndicesDeleted().isEmpty()); - assertFalse(manifest.getIndicesRouting().isEmpty()); - assertEquals(1, manifest.getIndicesRouting().size()); - assertTrue(manifest.getIndicesRouting().get(0).getUploadedFilename().contains(indexRoutingPath.buildAsString())); + + assertEquals(expectedIndexNames, manifest.getDiffManifest().getIndicesRoutingUpdated()); + assertEquals(expectedDeletedIndex, manifest.getDiffManifest().getIndicesDeleted()); + assertEquals(expectedIndicesRoutingFilesInManifest, manifest.getIndicesRouting().size()); + for (ClusterMetadataManifest.UploadedIndexMetadata uploadedFilename : manifest.getIndicesRouting()) { + assertTrue(uploadedFilename.getUploadedFilename().contains(indexRoutingPath.buildAsString())); + } + assertEquals(isRoutingTableDiffFileExpected, manifest.getDiffManifest().getIndicesRoutingDiffPath() != null); } private List getRoutingTableFromAllNodes() throws ExecutionException, InterruptedException { diff --git a/server/src/main/java/org/opensearch/cluster/routing/RoutingTableIncrementalDiff.java b/server/src/main/java/org/opensearch/cluster/routing/RoutingTableIncrementalDiff.java new file mode 100644 index 0000000000000..3d75b22a8ed7f --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/routing/RoutingTableIncrementalDiff.java @@ -0,0 +1,168 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing; + +import org.opensearch.cluster.Diff; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Represents a difference between {@link RoutingTable} objects that can be serialized and deserialized. + */ +public class RoutingTableIncrementalDiff implements Diff { + + private final Map> diffs; + + /** + * Constructs a new RoutingTableIncrementalDiff with the given differences. + * + * @param diffs a map containing the differences of {@link IndexRoutingTable}. + */ + public RoutingTableIncrementalDiff(Map> diffs) { + this.diffs = diffs; + } + + /** + * Gets the map of differences of {@link IndexRoutingTable}. + * + * @return a map containing the differences. + */ + public Map> getDiffs() { + return diffs; + } + + /** + * Reads a {@link RoutingTableIncrementalDiff} from the given {@link StreamInput}. + * + * @param in the input stream to read from. + * @return the deserialized RoutingTableIncrementalDiff. + * @throws IOException if an I/O exception occurs while reading from the stream. + */ + public static RoutingTableIncrementalDiff readFrom(StreamInput in) throws IOException { + int size = in.readVInt(); + Map> diffs = new HashMap<>(); + + for (int i = 0; i < size; i++) { + String key = in.readString(); + Diff diff = IndexRoutingTableIncrementalDiff.readFrom(in); + diffs.put(key, diff); + } + return new RoutingTableIncrementalDiff(diffs); + } + + /** + * Applies the differences to the provided {@link RoutingTable}. + * + * @param part the original RoutingTable to which the differences will be applied. + * @return the updated RoutingTable with the applied differences. + */ + @Override + public RoutingTable apply(RoutingTable part) { + RoutingTable.Builder builder = new RoutingTable.Builder(); + for (IndexRoutingTable indexRoutingTable : part) { + builder.add(indexRoutingTable); // Add existing index routing tables to builder + } + + // Apply the diffs + for (Map.Entry> entry : diffs.entrySet()) { + builder.add(entry.getValue().apply(part.index(entry.getKey()))); + } + + return builder.build(); + } + + /** + * Writes the differences to the given {@link StreamOutput}. + * + * @param out the output stream to write to. + * @throws IOException if an I/O exception occurs while writing to the stream. + */ + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(diffs.size()); + for (Map.Entry> entry : diffs.entrySet()) { + out.writeString(entry.getKey()); + entry.getValue().writeTo(out); + } + } + + /** + * Represents a difference between {@link IndexShardRoutingTable} objects that can be serialized and deserialized. + */ + public static class IndexRoutingTableIncrementalDiff implements Diff { + + private final List indexShardRoutingTables; + + /** + * Constructs a new IndexShardRoutingTableDiff with the given shard routing tables. + * + * @param indexShardRoutingTables a list of IndexShardRoutingTable representing the differences. + */ + public IndexRoutingTableIncrementalDiff(List indexShardRoutingTables) { + this.indexShardRoutingTables = indexShardRoutingTables; + } + + /** + * Applies the differences to the provided {@link IndexRoutingTable}. + * + * @param part the original IndexRoutingTable to which the differences will be applied. + * @return the updated IndexRoutingTable with the applied differences. + */ + @Override + public IndexRoutingTable apply(IndexRoutingTable part) { + IndexRoutingTable.Builder builder = new IndexRoutingTable.Builder(part.getIndex()); + for (IndexShardRoutingTable shardRoutingTable : part) { + builder.addIndexShard(shardRoutingTable); // Add existing shards to builder + } + + // Apply the diff: update or add the new shard routing tables + for (IndexShardRoutingTable diffShard : indexShardRoutingTables) { + builder.addIndexShard(diffShard); + } + return builder.build(); + } + + /** + * Writes the differences to the given {@link StreamOutput}. + * + * @param out the output stream to write to. + * @throws IOException if an I/O exception occurs while writing to the stream. + */ + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(indexShardRoutingTables.size()); + for (IndexShardRoutingTable shardRoutingTable : indexShardRoutingTables) { + IndexShardRoutingTable.Builder.writeTo(shardRoutingTable, out); + } + } + + /** + * Reads a {@link IndexRoutingTableIncrementalDiff} from the given {@link StreamInput}. + * + * @param in the input stream to read from. + * @return the deserialized IndexShardRoutingTableDiff. + * @throws IOException if an I/O exception occurs while reading from the stream. + */ + public static IndexRoutingTableIncrementalDiff readFrom(StreamInput in) throws IOException { + int size = in.readVInt(); + List indexShardRoutingTables = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + IndexShardRoutingTable shardRoutingTable = IndexShardRoutingTable.Builder.readFrom(in); + indexShardRoutingTables.add(shardRoutingTable); + } + return new IndexRoutingTableIncrementalDiff(indexShardRoutingTables); + } + } +} diff --git a/server/src/main/java/org/opensearch/cluster/routing/remote/InternalRemoteRoutingTableService.java b/server/src/main/java/org/opensearch/cluster/routing/remote/InternalRemoteRoutingTableService.java index d7ebc54598b37..3c578a8c5c01f 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/remote/InternalRemoteRoutingTableService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/remote/InternalRemoteRoutingTableService.java @@ -12,9 +12,11 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.action.LatchedActionListener; +import org.opensearch.cluster.Diff; import org.opensearch.cluster.DiffableUtils; import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.RoutingTableIncrementalDiff; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.remote.RemoteWritableEntityStore; @@ -25,8 +27,10 @@ import org.opensearch.core.compress.Compressor; import org.opensearch.gateway.remote.ClusterMetadataManifest; import org.opensearch.gateway.remote.RemoteStateTransferException; +import org.opensearch.gateway.remote.model.RemoteClusterStateBlobStore; import org.opensearch.gateway.remote.model.RemoteRoutingTableBlobStore; import org.opensearch.gateway.remote.routingtable.RemoteIndexRoutingTable; +import org.opensearch.gateway.remote.routingtable.RemoteRoutingTableDiff; import org.opensearch.index.translog.transfer.BlobStoreTransferService; import org.opensearch.node.Node; import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; @@ -58,6 +62,7 @@ public class InternalRemoteRoutingTableService extends AbstractLifecycleComponen private final Supplier repositoriesService; private Compressor compressor; private RemoteWritableEntityStore remoteIndexRoutingTableStore; + private RemoteWritableEntityStore remoteRoutingTableDiffStore; private final ClusterSettings clusterSettings; private BlobStoreRepository blobStoreRepository; private final ThreadPool threadPool; @@ -84,9 +89,10 @@ public List getIndicesRouting(RoutingTable routingTable) { /** * Returns diff between the two routing tables, which includes upserts and deletes. + * * @param before previous routing table - * @param after current routing table - * @return diff of the previous and current routing table + * @param after current routing table + * @return incremental diff of the previous and current routing table */ public DiffableUtils.MapDiff> getIndicesRoutingMapDiff( RoutingTable before, @@ -96,7 +102,7 @@ public DiffableUtils.MapDiff> indexRoutingTableDiff, + LatchedActionListener latchedActionListener + ) { + RoutingTableIncrementalDiff routingTableIncrementalDiff = new RoutingTableIncrementalDiff(indexRoutingTableDiff); + RemoteRoutingTableDiff remoteRoutingTableDiff = new RemoteRoutingTableDiff( + routingTableIncrementalDiff, + clusterUUID, + compressor, + term, + version + ); + + ActionListener completionListener = ActionListener.wrap( + resp -> latchedActionListener.onResponse(remoteRoutingTableDiff.getUploadedMetadata()), + ex -> latchedActionListener.onFailure( + new RemoteStateTransferException("Exception in writing index routing diff to remote store", ex) + ) + ); + + remoteRoutingTableDiffStore.writeAsync(remoteRoutingTableDiff, completionListener); + } + /** * Combines IndicesRoutingMetadata from previous manifest and current uploaded indices, removes deleted indices. * @param previousManifest previous manifest, used to get all existing indices routing paths @@ -171,6 +204,22 @@ public void getAsyncIndexRoutingReadAction( remoteIndexRoutingTableStore.readAsync(remoteIndexRoutingTable, actionListener); } + @Override + public void getAsyncIndexRoutingTableDiffReadAction( + String clusterUUID, + String uploadedFilename, + LatchedActionListener latchedActionListener + ) { + ActionListener actionListener = ActionListener.wrap( + latchedActionListener::onResponse, + latchedActionListener::onFailure + ); + + RemoteRoutingTableDiff remoteRoutingTableDiff = new RemoteRoutingTableDiff(uploadedFilename, clusterUUID, compressor); + + remoteRoutingTableDiffStore.readAsync(remoteRoutingTableDiff, actionListener); + } + @Override public List getUpdatedIndexRoutingTableMetadata( List updatedIndicesRouting, @@ -212,6 +261,14 @@ protected void doStart() { ThreadPool.Names.REMOTE_STATE_READ, clusterSettings ); + + this.remoteRoutingTableDiffStore = new RemoteClusterStateBlobStore<>( + new BlobStoreTransferService(blobStoreRepository.blobStore(), threadPool), + blobStoreRepository, + clusterName, + threadPool, + ThreadPool.Names.REMOTE_STATE_READ + ); } @Override @@ -227,4 +284,14 @@ public void deleteStaleIndexRoutingPaths(List stalePaths) throws IOExcep throw e; } } + + public void deleteStaleIndexRoutingDiffPaths(List stalePaths) throws IOException { + try { + logger.debug(() -> "Deleting stale index routing diff files from remote - " + stalePaths); + blobStoreRepository.blobStore().blobContainer(BlobPath.cleanPath()).deleteBlobsIgnoringIfNotExists(stalePaths); + } catch (IOException e) { + logger.error(() -> new ParameterizedMessage("Failed to delete some stale index routing diff paths from {}", stalePaths), e); + throw e; + } + } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/remote/NoopRemoteRoutingTableService.java b/server/src/main/java/org/opensearch/cluster/routing/remote/NoopRemoteRoutingTableService.java index e6e68e01e761f..1ebf3206212a1 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/remote/NoopRemoteRoutingTableService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/remote/NoopRemoteRoutingTableService.java @@ -9,9 +9,11 @@ package org.opensearch.cluster.routing.remote; import org.opensearch.action.LatchedActionListener; +import org.opensearch.cluster.Diff; import org.opensearch.cluster.DiffableUtils; import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.RoutingTableIncrementalDiff; import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.gateway.remote.ClusterMetadataManifest; @@ -34,7 +36,12 @@ public DiffableUtils.MapDiff> indexRoutingTableDiff, + LatchedActionListener latchedActionListener + ) { + // noop + } + @Override public List getAllUploadedIndicesRouting( ClusterMetadataManifest previousManifest, @@ -67,6 +85,15 @@ public void getAsyncIndexRoutingReadAction( // noop } + @Override + public void getAsyncIndexRoutingTableDiffReadAction( + String clusterUUID, + String uploadedFilename, + LatchedActionListener latchedActionListener + ) { + // noop + } + @Override public List getUpdatedIndexRoutingTableMetadata( List updatedIndicesRouting, @@ -95,4 +122,8 @@ protected void doClose() throws IOException { public void deleteStaleIndexRoutingPaths(List stalePaths) throws IOException { // noop } + + public void deleteStaleIndexRoutingDiffPaths(List stalePaths) throws IOException { + // noop + } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableService.java b/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableService.java index 0b0b4bb7dbc84..0811a5f3010f4 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableService.java @@ -9,15 +9,19 @@ package org.opensearch.cluster.routing.remote; import org.opensearch.action.LatchedActionListener; +import org.opensearch.cluster.Diff; import org.opensearch.cluster.DiffableUtils; import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.RoutingTableIncrementalDiff; import org.opensearch.common.lifecycle.LifecycleComponent; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.gateway.remote.ClusterMetadataManifest; import java.io.IOException; +import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -27,16 +31,36 @@ * @opensearch.internal */ public interface RemoteRoutingTableService extends LifecycleComponent { - public static final DiffableUtils.NonDiffableValueSerializer CUSTOM_ROUTING_TABLE_VALUE_SERIALIZER = - new DiffableUtils.NonDiffableValueSerializer() { + + public static final DiffableUtils.DiffableValueSerializer CUSTOM_ROUTING_TABLE_DIFFABLE_VALUE_SERIALIZER = + new DiffableUtils.DiffableValueSerializer() { + @Override + public IndexRoutingTable read(StreamInput in, String key) throws IOException { + return IndexRoutingTable.readFrom(in); + } + @Override public void write(IndexRoutingTable value, StreamOutput out) throws IOException { value.writeTo(out); } @Override - public IndexRoutingTable read(StreamInput in, String key) throws IOException { - return IndexRoutingTable.readFrom(in); + public Diff readDiff(StreamInput in, String key) throws IOException { + return IndexRoutingTable.readDiffFrom(in); + } + + @Override + public Diff diff(IndexRoutingTable currentState, IndexRoutingTable previousState) { + List diffs = new ArrayList<>(); + for (Map.Entry entry : currentState.getShards().entrySet()) { + Integer index = entry.getKey(); + IndexShardRoutingTable currentShardRoutingTable = entry.getValue(); + IndexShardRoutingTable previousShardRoutingTable = previousState.shard(index); + if (previousShardRoutingTable == null || !previousShardRoutingTable.equals(currentShardRoutingTable)) { + diffs.add(currentShardRoutingTable); + } + } + return new RoutingTableIncrementalDiff.IndexRoutingTableIncrementalDiff(diffs); } }; @@ -48,6 +72,12 @@ void getAsyncIndexRoutingReadAction( LatchedActionListener latchedActionListener ); + void getAsyncIndexRoutingTableDiffReadAction( + String clusterUUID, + String uploadedFilename, + LatchedActionListener latchedActionListener + ); + List getUpdatedIndexRoutingTableMetadata( List updatedIndicesRouting, List allIndicesRouting @@ -66,6 +96,14 @@ void getAsyncIndexRoutingWriteAction( LatchedActionListener latchedActionListener ); + void getAsyncIndexRoutingDiffWriteAction( + String clusterUUID, + long term, + long version, + Map> indexRoutingTableDiff, + LatchedActionListener latchedActionListener + ); + List getAllUploadedIndicesRouting( ClusterMetadataManifest previousManifest, List indicesRoutingUploaded, @@ -74,4 +112,6 @@ List getAllUploadedIndicesRouting public void deleteStaleIndexRoutingPaths(List stalePaths) throws IOException; + public void deleteStaleIndexRoutingDiffPaths(List stalePaths) throws IOException; + } diff --git a/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java b/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java index 3a66419b1dc20..71815b6ee324c 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java +++ b/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java @@ -44,6 +44,7 @@ public class ClusterMetadataManifest implements Writeable, ToXContentFragment { public static final int CODEC_V2 = 2; // In Codec V2, there are separate metadata files rather than a single global metadata file, // also we introduce index routing-metadata, diff and other attributes as part of manifest // required for state publication + public static final int CODEC_V3 = 3; // In Codec V3, we have introduced new diff field in diff-manifest's routing_table_diff private static final ParseField CLUSTER_TERM_FIELD = new ParseField("cluster_term"); private static final ParseField STATE_VERSION_FIELD = new ParseField("state_version"); @@ -109,6 +110,10 @@ private static ClusterMetadataManifest.Builder manifestV2Builder(Object[] fields .clusterStateCustomMetadataMap(clusterStateCustomMetadata(fields)); } + private static ClusterMetadataManifest.Builder manifestV3Builder(Object[] fields) { + return manifestV2Builder(fields); + } + private static long term(Object[] fields) { return (long) fields[0]; } @@ -226,12 +231,18 @@ private static ClusterStateDiffManifest diffManifest(Object[] fields) { fields -> manifestV2Builder(fields).build() ); - private static final ConstructingObjectParser CURRENT_PARSER = PARSER_V2; + private static final ConstructingObjectParser PARSER_V3 = new ConstructingObjectParser<>( + "cluster_metadata_manifest", + fields -> manifestV3Builder(fields).build() + ); + + private static final ConstructingObjectParser CURRENT_PARSER = PARSER_V3; static { declareParser(PARSER_V0, CODEC_V0); declareParser(PARSER_V1, CODEC_V1); declareParser(PARSER_V2, CODEC_V2); + declareParser(PARSER_V3, CODEC_V3); } private static void declareParser(ConstructingObjectParser parser, long codec_version) { @@ -309,7 +320,7 @@ private static void declareParser(ConstructingObjectParser ClusterStateDiffManifest.fromXContent(p), + (p, c) -> ClusterStateDiffManifest.fromXContent(p, codec_version), DIFF_MANIFEST ); } diff --git a/server/src/main/java/org/opensearch/gateway/remote/ClusterStateDiffManifest.java b/server/src/main/java/org/opensearch/gateway/remote/ClusterStateDiffManifest.java index aca53c92781e4..ab7fa1fddf4bf 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/ClusterStateDiffManifest.java +++ b/server/src/main/java/org/opensearch/gateway/remote/ClusterStateDiffManifest.java @@ -32,8 +32,8 @@ import static org.opensearch.cluster.DiffableUtils.NonDiffableValueSerializer.getAbstractInstance; import static org.opensearch.cluster.DiffableUtils.getStringKeySerializer; -import static org.opensearch.cluster.routing.remote.RemoteRoutingTableService.CUSTOM_ROUTING_TABLE_VALUE_SERIALIZER; import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; +import static org.opensearch.gateway.remote.ClusterMetadataManifest.CODEC_V3; /** * Manifest of diff between two cluster states @@ -53,6 +53,7 @@ public class ClusterStateDiffManifest implements ToXContentFragment, Writeable { private static final String METADATA_CUSTOM_DIFF_FIELD = "metadata_custom_diff"; private static final String UPSERTS_FIELD = "upserts"; private static final String DELETES_FIELD = "deletes"; + private static final String DIFF_FIELD = "diff"; private static final String CLUSTER_BLOCKS_UPDATED_FIELD = "cluster_blocks_diff"; private static final String DISCOVERY_NODES_UPDATED_FIELD = "discovery_nodes_diff"; private static final String ROUTING_TABLE_DIFF = "routing_table_diff"; @@ -72,11 +73,17 @@ public class ClusterStateDiffManifest implements ToXContentFragment, Writeable { private final boolean discoveryNodesUpdated; private final List indicesRoutingUpdated; private final List indicesRoutingDeleted; + private String indicesRoutingDiffPath; private final boolean hashesOfConsistentSettingsUpdated; private final List clusterStateCustomUpdated; private final List clusterStateCustomDeleted; - public ClusterStateDiffManifest(ClusterState state, ClusterState previousState) { + public ClusterStateDiffManifest( + ClusterState state, + ClusterState previousState, + DiffableUtils.MapDiff> routingTableIncrementalDiff, + String indicesRoutingDiffPath + ) { fromStateUUID = previousState.stateUUID(); toStateUUID = state.stateUUID(); coordinationMetadataUpdated = !Metadata.isCoordinationMetadataEqual(state.metadata(), previousState.metadata()); @@ -103,17 +110,13 @@ public ClusterStateDiffManifest(ClusterState state, ClusterState previousState) customMetadataUpdated.addAll(customDiff.getUpserts().keySet()); customMetadataDeleted = customDiff.getDeletes(); - DiffableUtils.MapDiff> routingTableDiff = DiffableUtils.diff( - previousState.getRoutingTable().getIndicesRouting(), - state.getRoutingTable().getIndicesRouting(), - DiffableUtils.getStringKeySerializer(), - CUSTOM_ROUTING_TABLE_VALUE_SERIALIZER - ); - indicesRoutingUpdated = new ArrayList<>(); - routingTableDiff.getUpserts().forEach((k, v) -> indicesRoutingUpdated.add(k)); - - indicesRoutingDeleted = routingTableDiff.getDeletes(); + indicesRoutingDeleted = new ArrayList<>(); + this.indicesRoutingDiffPath = indicesRoutingDiffPath; + if (routingTableIncrementalDiff != null) { + routingTableIncrementalDiff.getUpserts().forEach((k, v) -> indicesRoutingUpdated.add(k)); + indicesRoutingDeleted.addAll(routingTableIncrementalDiff.getDeletes()); + } hashesOfConsistentSettingsUpdated = !state.metadata() .hashesOfConsistentSettings() .equals(previousState.metadata().hashesOfConsistentSettings()); @@ -126,6 +129,7 @@ public ClusterStateDiffManifest(ClusterState state, ClusterState previousState) clusterStateCustomUpdated = new ArrayList<>(clusterStateCustomDiff.getDiffs().keySet()); clusterStateCustomUpdated.addAll(clusterStateCustomDiff.getUpserts().keySet()); clusterStateCustomDeleted = clusterStateCustomDiff.getDeletes(); + List indicie1s = indicesRoutingUpdated; } public ClusterStateDiffManifest( @@ -143,6 +147,7 @@ public ClusterStateDiffManifest( boolean discoveryNodesUpdated, List indicesRoutingUpdated, List indicesRoutingDeleted, + String indicesRoutingDiffPath, boolean hashesOfConsistentSettingsUpdated, List clusterStateCustomUpdated, List clusterStateCustomDeleted @@ -164,6 +169,7 @@ public ClusterStateDiffManifest( this.hashesOfConsistentSettingsUpdated = hashesOfConsistentSettingsUpdated; this.clusterStateCustomUpdated = Collections.unmodifiableList(clusterStateCustomUpdated); this.clusterStateCustomDeleted = Collections.unmodifiableList(clusterStateCustomDeleted); + this.indicesRoutingDiffPath = indicesRoutingDiffPath; } public ClusterStateDiffManifest(StreamInput in) throws IOException { @@ -184,6 +190,7 @@ public ClusterStateDiffManifest(StreamInput in) throws IOException { this.hashesOfConsistentSettingsUpdated = in.readBoolean(); this.clusterStateCustomUpdated = in.readStringList(); this.clusterStateCustomDeleted = in.readStringList(); + this.indicesRoutingDiffPath = in.readString(); } @Override @@ -237,6 +244,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.value(index); } builder.endArray(); + if (indicesRoutingDiffPath != null) { + builder.field(DIFF_FIELD, indicesRoutingDiffPath); + } builder.endObject(); builder.startObject(CLUSTER_STATE_CUSTOM_DIFF_FIELD); builder.startArray(UPSERTS_FIELD); @@ -253,7 +263,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - public static ClusterStateDiffManifest fromXContent(XContentParser parser) throws IOException { + public static ClusterStateDiffManifest fromXContent(XContentParser parser, long codec_version) throws IOException { Builder builder = new Builder(); if (parser.currentToken() == null) { // fresh parser? move to next token parser.nextToken(); @@ -341,6 +351,11 @@ public static ClusterStateDiffManifest fromXContent(XContentParser parser) throw case DELETES_FIELD: builder.indicesRoutingDeleted(convertListToString(parser.listOrderedMap())); break; + case DIFF_FIELD: + if (codec_version >= CODEC_V3) { + builder.indicesRoutingDiffPath(parser.textOrNull()); + } + break; default: throw new XContentParseException("Unexpected field [" + currentFieldName + "]"); } @@ -456,6 +471,10 @@ public List getIndicesRoutingUpdated() { return indicesRoutingUpdated; } + public String getIndicesRoutingDiffPath() { + return indicesRoutingDiffPath; + } + public List getIndicesRoutingDeleted() { return indicesRoutingDeleted; } @@ -468,6 +487,10 @@ public List getClusterStateCustomDeleted() { return clusterStateCustomDeleted; } + public void setIndicesRoutingDiffPath(String indicesRoutingDiffPath) { + this.indicesRoutingDiffPath = indicesRoutingDiffPath; + } + @Override public boolean equals(Object o) { if (this == o) return true; @@ -489,7 +512,8 @@ public boolean equals(Object o) { && Objects.equals(indicesRoutingUpdated, that.indicesRoutingUpdated) && Objects.equals(indicesRoutingDeleted, that.indicesRoutingDeleted) && Objects.equals(clusterStateCustomUpdated, that.clusterStateCustomUpdated) - && Objects.equals(clusterStateCustomDeleted, that.clusterStateCustomDeleted); + && Objects.equals(clusterStateCustomDeleted, that.clusterStateCustomDeleted) + && Objects.equals(indicesRoutingDiffPath, that.indicesRoutingDiffPath); } @Override @@ -538,6 +562,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(hashesOfConsistentSettingsUpdated); out.writeStringCollection(clusterStateCustomUpdated); out.writeStringCollection(clusterStateCustomDeleted); + out.writeString(indicesRoutingDiffPath); } /** @@ -560,6 +585,7 @@ public static class Builder { private boolean discoveryNodesUpdated; private List indicesRoutingUpdated; private List indicesRoutingDeleted; + private String indicesRoutingDiff; private boolean hashesOfConsistentSettingsUpdated; private List clusterStateCustomUpdated; private List clusterStateCustomDeleted; @@ -650,6 +676,11 @@ public Builder indicesRoutingDeleted(List indicesRoutingDeleted) { return this; } + public Builder indicesRoutingDiffPath(String indicesRoutingDiffPath) { + this.indicesRoutingDiff = indicesRoutingDiffPath; + return this; + } + public Builder clusterStateCustomUpdated(List clusterStateCustomUpdated) { this.clusterStateCustomUpdated = clusterStateCustomUpdated; return this; @@ -676,6 +707,7 @@ public ClusterStateDiffManifest build() { discoveryNodesUpdated, indicesRoutingUpdated, indicesRoutingDeleted, + indicesRoutingDiff, hashesOfConsistentSettingsUpdated, clusterStateCustomUpdated, clusterStateCustomDeleted diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManager.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManager.java index 99235bc96bfe3..8691187c7fbfa 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManager.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManager.java @@ -179,6 +179,7 @@ void deleteClusterMetadata( Set staleGlobalMetadataPaths = new HashSet<>(); Set staleEphemeralAttributePaths = new HashSet<>(); Set staleIndexRoutingPaths = new HashSet<>(); + Set staleIndexRoutingDiffPaths = new HashSet<>(); activeManifestBlobMetadata.forEach(blobMetadata -> { ClusterMetadataManifest clusterMetadataManifest = remoteManifestManager.fetchRemoteClusterMetadataManifest( clusterName, @@ -222,6 +223,10 @@ void deleteClusterMetadata( clusterMetadataManifest.getIndicesRouting() .forEach(uploadedIndicesRouting -> filesToKeep.add(uploadedIndicesRouting.getUploadedFilename())); } + if (clusterMetadataManifest.getDiffManifest() != null + && clusterMetadataManifest.getDiffManifest().getIndicesRoutingDiffPath() != null) { + filesToKeep.add(clusterMetadataManifest.getDiffManifest().getIndicesRoutingDiffPath()); + } }); staleManifestBlobMetadata.forEach(blobMetadata -> { ClusterMetadataManifest clusterMetadataManifest = remoteManifestManager.fetchRemoteClusterMetadataManifest( @@ -264,6 +269,18 @@ void deleteClusterMetadata( } }); } + if (clusterMetadataManifest.getDiffManifest() != null + && clusterMetadataManifest.getDiffManifest().getIndicesRoutingDiffPath() != null) { + if (!filesToKeep.contains(clusterMetadataManifest.getDiffManifest().getIndicesRoutingDiffPath())) { + staleIndexRoutingDiffPaths.add(clusterMetadataManifest.getDiffManifest().getIndicesRoutingDiffPath()); + logger.debug( + () -> new ParameterizedMessage( + "Indices routing diff paths in stale manifest: {}", + clusterMetadataManifest.getDiffManifest().getIndicesRoutingDiffPath() + ) + ); + } + } clusterMetadataManifest.getIndices().forEach(uploadedIndexMetadata -> { String fileName = RemoteClusterStateUtils.getFormattedIndexFileName(uploadedIndexMetadata.getUploadedFilename()); @@ -316,6 +333,15 @@ void deleteClusterMetadata( ); remoteStateStats.indexRoutingFilesCleanupAttemptFailed(); } + try { + remoteRoutingTableService.deleteStaleIndexRoutingDiffPaths(new ArrayList<>(staleIndexRoutingDiffPaths)); + } catch (IOException e) { + logger.error( + () -> new ParameterizedMessage("Error while deleting stale index routing diff files {}", staleIndexRoutingDiffPaths), + e + ); + remoteStateStats.indicesRoutingDiffFileCleanupAttemptFailed(); + } } catch (IllegalStateException e) { logger.error("Error while fetching Remote Cluster Metadata manifests", e); } catch (IOException e) { diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index b34641f77f607..674279f2251bd 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -14,6 +14,7 @@ import org.opensearch.action.LatchedActionListener; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.Diff; import org.opensearch.cluster.DiffableUtils; import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.coordination.CoordinationMetadata; @@ -26,6 +27,7 @@ import org.opensearch.cluster.node.DiscoveryNodes.Builder; import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.RoutingTableIncrementalDiff; import org.opensearch.cluster.routing.remote.RemoteRoutingTableService; import org.opensearch.cluster.routing.remote.RemoteRoutingTableServiceFactory; import org.opensearch.cluster.service.ClusterService; @@ -56,6 +58,7 @@ import org.opensearch.gateway.remote.model.RemoteReadResult; import org.opensearch.gateway.remote.model.RemoteTemplatesMetadata; import org.opensearch.gateway.remote.model.RemoteTransientSettingsMetadata; +import org.opensearch.gateway.remote.routingtable.RemoteRoutingTableDiff; import org.opensearch.index.translog.transfer.BlobStoreTransferService; import org.opensearch.node.Node; import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; @@ -234,13 +237,21 @@ public RemoteClusterStateManifestInfo writeFullMetadata(ClusterState clusterStat isPublicationEnabled, isPublicationEnabled ? clusterState.customs() : Collections.emptyMap(), isPublicationEnabled, - remoteRoutingTableService.getIndicesRouting(clusterState.getRoutingTable()) + remoteRoutingTableService.getIndicesRouting(clusterState.getRoutingTable()), + null + ); + + ClusterStateDiffManifest clusterStateDiffManifest = new ClusterStateDiffManifest( + clusterState, + ClusterState.EMPTY_STATE, + null, + null ); final RemoteClusterStateManifestInfo manifestDetails = remoteManifestManager.uploadManifest( clusterState, uploadedMetadataResults, previousClusterUUID, - new ClusterStateDiffManifest(clusterState, ClusterState.EMPTY_STATE), + clusterStateDiffManifest, false ); @@ -330,10 +341,13 @@ public RemoteClusterStateManifestInfo writeIncrementalMetadata( indicesToBeDeletedFromRemote.remove(indexMetadata.getIndex().getName()); } - final DiffableUtils.MapDiff> routingTableDiff = remoteRoutingTableService - .getIndicesRoutingMapDiff(previousClusterState.getRoutingTable(), clusterState.getRoutingTable()); final List indicesRoutingToUpload = new ArrayList<>(); - routingTableDiff.getUpserts().forEach((k, v) -> indicesRoutingToUpload.add(v)); + final DiffableUtils.MapDiff> routingTableIncrementalDiff = + remoteRoutingTableService.getIndicesRoutingMapDiff(previousClusterState.getRoutingTable(), clusterState.getRoutingTable()); + + Map> indexRoutingTableDiffs = routingTableIncrementalDiff.getDiffs(); + routingTableIncrementalDiff.getDiffs().forEach((k, v) -> indicesRoutingToUpload.add(clusterState.getRoutingTable().index(k))); + routingTableIncrementalDiff.getUpserts().forEach((k, v) -> indicesRoutingToUpload.add(v)); UploadedMetadataResults uploadedMetadataResults; // For migration case from codec V0 or V1 to V2, we have added null check on metadata attribute files, @@ -369,7 +383,8 @@ public RemoteClusterStateManifestInfo writeIncrementalMetadata( updateTransientSettingsMetadata, clusterStateCustomsDiff.getUpserts(), updateHashesOfConsistentSettings, - indicesRoutingToUpload + indicesRoutingToUpload, + indexRoutingTableDiffs ); // update the map if the metadata was uploaded @@ -411,14 +426,23 @@ public RemoteClusterStateManifestInfo writeIncrementalMetadata( uploadedMetadataResults.uploadedIndicesRoutingMetadata = remoteRoutingTableService.getAllUploadedIndicesRouting( previousManifest, uploadedMetadataResults.uploadedIndicesRoutingMetadata, - routingTableDiff.getDeletes() + routingTableIncrementalDiff.getDeletes() + ); + + ClusterStateDiffManifest clusterStateDiffManifest = new ClusterStateDiffManifest( + clusterState, + previousClusterState, + routingTableIncrementalDiff, + uploadedMetadataResults.uploadedIndicesRoutingDiffMetadata != null + ? uploadedMetadataResults.uploadedIndicesRoutingDiffMetadata.getUploadedFilename() + : null ); final RemoteClusterStateManifestInfo manifestDetails = remoteManifestManager.uploadManifest( clusterState, uploadedMetadataResults, previousManifest.getPreviousClusterUUID(), - new ClusterStateDiffManifest(clusterState, previousClusterState), + clusterStateDiffManifest, false ); @@ -488,13 +512,15 @@ UploadedMetadataResults writeMetadataInParallel( boolean uploadTransientSettingMetadata, Map clusterStateCustomToUpload, boolean uploadHashesOfConsistentSettings, - List indicesRoutingToUpload + List indicesRoutingToUpload, + Map> indexRoutingTableDiff ) throws IOException { assert Objects.nonNull(indexMetadataUploadListeners) : "indexMetadataUploadListeners can not be null"; int totalUploadTasks = indexToUpload.size() + indexMetadataUploadListeners.size() + customToUpload.size() + (uploadCoordinationMetadata ? 1 : 0) + (uploadSettingsMetadata ? 1 : 0) + (uploadTemplateMetadata ? 1 : 0) + (uploadDiscoveryNodes ? 1 : 0) + (uploadClusterBlock ? 1 : 0) + (uploadTransientSettingMetadata ? 1 : 0) - + clusterStateCustomToUpload.size() + (uploadHashesOfConsistentSettings ? 1 : 0) + indicesRoutingToUpload.size(); + + clusterStateCustomToUpload.size() + (uploadHashesOfConsistentSettings ? 1 : 0) + indicesRoutingToUpload.size() + + (indexRoutingTableDiff != null && !indexRoutingTableDiff.isEmpty() ? 1 : 0); CountDownLatch latch = new CountDownLatch(totalUploadTasks); List uploadTasks = Collections.synchronizedList(new ArrayList<>(totalUploadTasks)); Map results = new ConcurrentHashMap<>(totalUploadTasks); @@ -664,6 +690,16 @@ UploadedMetadataResults writeMetadataInParallel( listener ); }); + if (indexRoutingTableDiff != null && !indexRoutingTableDiff.isEmpty()) { + uploadTasks.add(RemoteRoutingTableDiff.ROUTING_TABLE_DIFF_FILE); + remoteRoutingTableService.getAsyncIndexRoutingDiffWriteAction( + clusterState.metadata().clusterUUID(), + clusterState.term(), + clusterState.version(), + indexRoutingTableDiff, + listener + ); + } invokeIndexMetadataUploadListeners(indexToUpload, prevIndexMetadataByName, latch, exceptionList); try { @@ -710,6 +746,8 @@ UploadedMetadataResults writeMetadataInParallel( if (uploadedMetadata.getClass().equals(UploadedIndexMetadata.class) && uploadedMetadata.getComponent().contains(INDEX_ROUTING_METADATA_PREFIX)) { response.uploadedIndicesRoutingMetadata.add((UploadedIndexMetadata) uploadedMetadata); + } else if (RemoteRoutingTableDiff.ROUTING_TABLE_DIFF_FILE.equals(name)) { + response.uploadedIndicesRoutingDiffMetadata = (UploadedMetadataAttribute) uploadedMetadata; } else if (name.startsWith(CUSTOM_METADATA)) { // component name for custom metadata will look like custom-- String custom = name.split(DELIMITER)[0].split(CUSTOM_DELIMITER)[1]; @@ -979,16 +1017,18 @@ ClusterState readClusterStateInParallel( List indicesRoutingToRead, boolean readHashesOfConsistentSettings, Map clusterStateCustomToRead, + boolean readIndexRoutingTableDiff, boolean includeEphemeral ) throws IOException { int totalReadTasks = indicesToRead.size() + customToRead.size() + (readCoordinationMetadata ? 1 : 0) + (readSettingsMetadata ? 1 : 0) + (readTemplatesMetadata ? 1 : 0) + (readDiscoveryNodes ? 1 : 0) + (readClusterBlocks ? 1 : 0) + (readTransientSettingsMetadata ? 1 : 0) + (readHashesOfConsistentSettings ? 1 : 0) + clusterStateCustomToRead.size() - + indicesRoutingToRead.size(); + + indicesRoutingToRead.size() + (readIndexRoutingTableDiff ? 1 : 0); CountDownLatch latch = new CountDownLatch(totalReadTasks); List readResults = Collections.synchronizedList(new ArrayList<>()); List readIndexRoutingTableResults = Collections.synchronizedList(new ArrayList<>()); + AtomicReference readIndexRoutingTableDiffResults = new AtomicReference<>(); List exceptionList = Collections.synchronizedList(new ArrayList<>(totalReadTasks)); LatchedActionListener listener = new LatchedActionListener<>(ActionListener.wrap(response -> { @@ -1031,6 +1071,25 @@ ClusterState readClusterStateInParallel( ); } + LatchedActionListener routingTableDiffLatchedActionListener = new LatchedActionListener<>( + ActionListener.wrap(response -> { + logger.debug("Successfully read routing table diff component from remote"); + readIndexRoutingTableDiffResults.set(response); + }, ex -> { + logger.error("Failed to read routing table diff from remote", ex); + exceptionList.add(ex); + }), + latch + ); + + if (readIndexRoutingTableDiff) { + remoteRoutingTableService.getAsyncIndexRoutingTableDiffReadAction( + clusterUUID, + manifest.getDiffManifest().getIndicesRoutingDiffPath(), + routingTableDiffLatchedActionListener + ); + } + for (Map.Entry entry : customToRead.entrySet()) { remoteGlobalMetadataManager.readAsync( entry.getValue().getAttributeName(), @@ -1233,6 +1292,14 @@ ClusterState readClusterStateInParallel( readIndexRoutingTableResults.forEach( indexRoutingTable -> indicesRouting.put(indexRoutingTable.getIndex().getName(), indexRoutingTable) ); + RoutingTableIncrementalDiff routingTableDiff = readIndexRoutingTableDiffResults.get(); + if (routingTableDiff != null) { + routingTableDiff.getDiffs().forEach((key, diff) -> { + IndexRoutingTable previousIndexRoutingTable = indicesRouting.get(key); + IndexRoutingTable updatedTable = diff.apply(previousIndexRoutingTable); + indicesRouting.put(key, updatedTable); + }); + } clusterStateBuilder.routingTable(new RoutingTable(manifest.getRoutingTableVersion(), indicesRouting)); return clusterStateBuilder.build(); @@ -1261,6 +1328,7 @@ public ClusterState getClusterStateForManifest( includeEphemeral ? manifest.getIndicesRouting() : emptyList(), includeEphemeral && manifest.getHashesOfConsistentSettings() != null, includeEphemeral ? manifest.getClusterStateCustomMap() : emptyMap(), + false, includeEphemeral ); } else { @@ -1281,6 +1349,7 @@ public ClusterState getClusterStateForManifest( emptyList(), false, emptyMap(), + false, false ); Metadata.Builder mb = Metadata.builder(remoteGlobalMetadataManager.getGlobalMetadata(manifest.getClusterUUID(), manifest)); @@ -1337,6 +1406,9 @@ public ClusterState getClusterStateUsingDiff(ClusterMetadataManifest manifest, C updatedIndexRouting, diff.isHashesOfConsistentSettingsUpdated(), updatedClusterStateCustom, + manifest.getDiffManifest() != null + && manifest.getDiffManifest().getIndicesRoutingDiffPath() != null + && !manifest.getDiffManifest().getIndicesRoutingDiffPath().isEmpty(), true ); ClusterState.Builder clusterStateBuilder = ClusterState.builder(updatedClusterState); diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateUtils.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateUtils.java index f2b93c3784407..74cb838286961 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateUtils.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateUtils.java @@ -88,6 +88,7 @@ public static class UploadedMetadataResults { ClusterMetadataManifest.UploadedMetadataAttribute uploadedClusterBlocks; List uploadedIndicesRoutingMetadata; ClusterMetadataManifest.UploadedMetadataAttribute uploadedHashesOfConsistentSettings; + ClusterMetadataManifest.UploadedMetadataAttribute uploadedIndicesRoutingDiffMetadata; public UploadedMetadataResults( List uploadedIndexMetadata, diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemotePersistenceStats.java b/server/src/main/java/org/opensearch/gateway/remote/RemotePersistenceStats.java index 36d107a99d258..efd73e11e46b5 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemotePersistenceStats.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemotePersistenceStats.java @@ -20,15 +20,18 @@ public class RemotePersistenceStats extends PersistedStateStats { static final String CLEANUP_ATTEMPT_FAILED_COUNT = "cleanup_attempt_failed_count"; static final String INDEX_ROUTING_FILES_CLEANUP_ATTEMPT_FAILED_COUNT = "index_routing_files_cleanup_attempt_failed_count"; + static final String INDICES_ROUTING_DIFF_FILES_CLEANUP_ATTEMPT_FAILED_COUNT = "indices_routing_diff_files_cleanup_attempt_failed_count"; static final String REMOTE_UPLOAD = "remote_upload"; private AtomicLong cleanupAttemptFailedCount = new AtomicLong(0); private AtomicLong indexRoutingFilesCleanupAttemptFailedCount = new AtomicLong(0); + private AtomicLong indicesRoutingDiffFilesCleanupAttemptFailedCount = new AtomicLong(0); public RemotePersistenceStats() { super(REMOTE_UPLOAD); addToExtendedFields(CLEANUP_ATTEMPT_FAILED_COUNT, cleanupAttemptFailedCount); addToExtendedFields(INDEX_ROUTING_FILES_CLEANUP_ATTEMPT_FAILED_COUNT, indexRoutingFilesCleanupAttemptFailedCount); + addToExtendedFields(INDICES_ROUTING_DIFF_FILES_CLEANUP_ATTEMPT_FAILED_COUNT, indicesRoutingDiffFilesCleanupAttemptFailedCount); } public void cleanUpAttemptFailed() { @@ -46,4 +49,12 @@ public void indexRoutingFilesCleanupAttemptFailed() { public long getIndexRoutingFilesCleanupAttemptFailedCount() { return indexRoutingFilesCleanupAttemptFailedCount.get(); } + + public void indicesRoutingDiffFileCleanupAttemptFailed() { + indexRoutingFilesCleanupAttemptFailedCount.incrementAndGet(); + } + + public long getIndicesRoutingDiffFileCleanupAttemptFailedCount() { + return indexRoutingFilesCleanupAttemptFailedCount.get(); + } } diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterMetadataManifest.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterMetadataManifest.java index 1dc56712d4ab5..acaae3173315a 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterMetadataManifest.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterMetadataManifest.java @@ -35,7 +35,7 @@ public class RemoteClusterMetadataManifest extends AbstractRemoteWritableBlobEnt public static final int SPLITTED_MANIFEST_FILE_LENGTH = 6; public static final String METADATA_MANIFEST_NAME_FORMAT = "%s"; - public static final int MANIFEST_CURRENT_CODEC_VERSION = ClusterMetadataManifest.CODEC_V2; + public static final int MANIFEST_CURRENT_CODEC_VERSION = ClusterMetadataManifest.CODEC_V3; public static final String COMMITTED = "C"; public static final String PUBLISHED = "P"; @@ -50,6 +50,9 @@ public class RemoteClusterMetadataManifest extends AbstractRemoteWritableBlobEnt public static final ChecksumBlobStoreFormat CLUSTER_METADATA_MANIFEST_FORMAT_V1 = new ChecksumBlobStoreFormat<>("cluster-metadata-manifest", METADATA_MANIFEST_NAME_FORMAT, ClusterMetadataManifest::fromXContentV1); + public static final ChecksumBlobStoreFormat CLUSTER_METADATA_MANIFEST_FORMAT_V2 = + new ChecksumBlobStoreFormat<>("cluster-metadata-manifest", METADATA_MANIFEST_NAME_FORMAT, ClusterMetadataManifest::fromXContentV2); + /** * Manifest format compatible with codec v2, where we introduced codec versions/global metadata. */ @@ -149,6 +152,8 @@ private ChecksumBlobStoreFormat getClusterMetadataManif long codecVersion = getManifestCodecVersion(); if (codecVersion == MANIFEST_CURRENT_CODEC_VERSION) { return CLUSTER_METADATA_MANIFEST_FORMAT; + } else if (codecVersion == ClusterMetadataManifest.CODEC_V2) { + return CLUSTER_METADATA_MANIFEST_FORMAT_V2; } else if (codecVersion == ClusterMetadataManifest.CODEC_V1) { return CLUSTER_METADATA_MANIFEST_FORMAT_V1; } else if (codecVersion == ClusterMetadataManifest.CODEC_V0) { diff --git a/server/src/main/java/org/opensearch/gateway/remote/routingtable/RemoteRoutingTableDiff.java b/server/src/main/java/org/opensearch/gateway/remote/routingtable/RemoteRoutingTableDiff.java new file mode 100644 index 0000000000000..e876d939490d0 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/routingtable/RemoteRoutingTableDiff.java @@ -0,0 +1,150 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote.routingtable; + +import org.opensearch.cluster.Diff; +import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.RoutingTableIncrementalDiff; +import org.opensearch.common.io.Streams; +import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.BlobPathParameters; +import org.opensearch.core.compress.Compressor; +import org.opensearch.gateway.remote.ClusterMetadataManifest; +import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.repositories.blobstore.ChecksumWritableBlobStoreFormat; + +import java.io.IOException; +import java.io.InputStream; +import java.util.List; +import java.util.Map; + +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; + +/** + * Represents a incremental difference between {@link org.opensearch.cluster.routing.RoutingTable} objects that can be serialized and deserialized. + * This class is responsible for writing and reading the differences between RoutingTables to and from an input/output stream. + */ +public class RemoteRoutingTableDiff extends AbstractRemoteWritableBlobEntity { + private final RoutingTableIncrementalDiff routingTableIncrementalDiff; + + private long term; + private long version; + + public static final String ROUTING_TABLE_DIFF = "routing-table-diff"; + + public static final String ROUTING_TABLE_DIFF_METADATA_PREFIX = "routingTableDiff--"; + + public static final String ROUTING_TABLE_DIFF_FILE = "routing_table_diff"; + private static final String codec = "RemoteRoutingTableDiff"; + public static final String ROUTING_TABLE_DIFF_PATH_TOKEN = "routing-table-diff"; + + public static final int VERSION = 1; + + public static final ChecksumWritableBlobStoreFormat REMOTE_ROUTING_TABLE_DIFF_FORMAT = + new ChecksumWritableBlobStoreFormat<>(codec, RoutingTableIncrementalDiff::readFrom); + + /** + * Constructs a new RemoteRoutingTableDiff with the given differences. + * + * @param routingTableIncrementalDiff a RoutingTableIncrementalDiff object containing the differences of {@link IndexRoutingTable}. + * @param clusterUUID the cluster UUID. + * @param compressor the compressor to be used. + * @param term the term of the routing table. + * @param version the version of the routing table. + */ + public RemoteRoutingTableDiff( + RoutingTableIncrementalDiff routingTableIncrementalDiff, + String clusterUUID, + Compressor compressor, + long term, + long version + ) { + super(clusterUUID, compressor); + this.routingTableIncrementalDiff = routingTableIncrementalDiff; + this.term = term; + this.version = version; + } + + /** + * Constructs a new RemoteRoutingTableDiff with the given differences. + * + * @param routingTableIncrementalDiff a RoutingTableIncrementalDiff object containing the differences of {@link IndexRoutingTable}. + * @param clusterUUID the cluster UUID. + * @param compressor the compressor to be used. + */ + public RemoteRoutingTableDiff(RoutingTableIncrementalDiff routingTableIncrementalDiff, String clusterUUID, Compressor compressor) { + super(clusterUUID, compressor); + this.routingTableIncrementalDiff = routingTableIncrementalDiff; + } + + /** + * Constructs a new RemoteIndexRoutingTableDiff with the given blob name, cluster UUID, and compressor. + * + * @param blobName the name of the blob. + * @param clusterUUID the cluster UUID. + * @param compressor the compressor to be used. + */ + public RemoteRoutingTableDiff(String blobName, String clusterUUID, Compressor compressor) { + super(clusterUUID, compressor); + this.routingTableIncrementalDiff = null; + this.blobName = blobName; + } + + /** + * Gets the map of differences of {@link IndexRoutingTable}. + * + * @return a map containing the differences. + */ + public Map> getDiffs() { + assert routingTableIncrementalDiff != null; + return routingTableIncrementalDiff.getDiffs(); + } + + @Override + public BlobPathParameters getBlobPathParameters() { + return new BlobPathParameters(List.of(ROUTING_TABLE_DIFF_PATH_TOKEN), ROUTING_TABLE_DIFF_METADATA_PREFIX); + } + + @Override + public String getType() { + return ROUTING_TABLE_DIFF; + } + + @Override + public String generateBlobFileName() { + if (blobFileName == null) { + blobFileName = String.join( + DELIMITER, + getBlobPathParameters().getFilePrefix(), + RemoteStoreUtils.invertLong(term), + RemoteStoreUtils.invertLong(version), + RemoteStoreUtils.invertLong(System.currentTimeMillis()) + ); + } + return blobFileName; + } + + @Override + public ClusterMetadataManifest.UploadedMetadata getUploadedMetadata() { + assert blobName != null; + return new ClusterMetadataManifest.UploadedMetadataAttribute(ROUTING_TABLE_DIFF_FILE, blobName); + } + + @Override + public InputStream serialize() throws IOException { + assert routingTableIncrementalDiff != null; + return REMOTE_ROUTING_TABLE_DIFF_FORMAT.serialize(routingTableIncrementalDiff, generateBlobFileName(), getCompressor()) + .streamInput(); + } + + @Override + public RoutingTableIncrementalDiff deserialize(InputStream in) throws IOException { + return REMOTE_ROUTING_TABLE_DIFF_FORMAT.deserialize(blobName, Streams.readFully(in)); + } +} diff --git a/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceTests.java b/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceTests.java index f66e096e9b548..74254f1a1987f 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceTests.java @@ -12,12 +12,15 @@ import org.opensearch.action.LatchedActionListener; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.Diff; import org.opensearch.cluster.DiffableUtils; import org.opensearch.cluster.coordination.CoordinationMetadata; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.RoutingTableIncrementalDiff; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobPath; @@ -50,8 +53,11 @@ import java.io.IOException; import java.io.InputStream; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; +import java.util.Base64; +import java.util.HashMap; import java.util.List; import java.util.Locale; import java.util.Map; @@ -69,6 +75,10 @@ import static org.opensearch.gateway.remote.routingtable.RemoteIndexRoutingTable.INDEX_ROUTING_METADATA_PREFIX; import static org.opensearch.gateway.remote.routingtable.RemoteIndexRoutingTable.INDEX_ROUTING_TABLE; import static org.opensearch.gateway.remote.routingtable.RemoteIndexRoutingTable.INDEX_ROUTING_TABLE_FORMAT; +import static org.opensearch.gateway.remote.routingtable.RemoteRoutingTableDiff.REMOTE_ROUTING_TABLE_DIFF_FORMAT; +import static org.opensearch.gateway.remote.routingtable.RemoteRoutingTableDiff.ROUTING_TABLE_DIFF_FILE; +import static org.opensearch.gateway.remote.routingtable.RemoteRoutingTableDiff.ROUTING_TABLE_DIFF_METADATA_PREFIX; +import static org.opensearch.gateway.remote.routingtable.RemoteRoutingTableDiff.ROUTING_TABLE_DIFF_PATH_TOKEN; import static org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm.FNV_1A_BASE64; import static org.opensearch.index.remote.RemoteStoreEnums.PathType.HASHED_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY; @@ -281,10 +291,14 @@ public void testGetIndicesRoutingMapDiffShardChanged() { DiffableUtils.MapDiff> diff = remoteRoutingTableService .getIndicesRoutingMapDiff(routingTable, routingTable2); - assertEquals(1, diff.getUpserts().size()); - assertNotNull(diff.getUpserts().get(indexName)); - assertEquals(noOfShards + 1, diff.getUpserts().get(indexName).getShards().size()); - assertEquals(noOfReplicas + 1, diff.getUpserts().get(indexName).getShards().get(0).getSize()); + assertEquals(0, diff.getUpserts().size()); + assertEquals(1, diff.getDiffs().size()); + assertNotNull(diff.getDiffs().get(indexName)); + assertEquals(noOfShards + 1, diff.getDiffs().get(indexName).apply(routingTable.indicesRouting().get(indexName)).shards().size()); + assertEquals( + noOfReplicas + 1, + diff.getDiffs().get(indexName).apply(routingTable.indicesRouting().get(indexName)).getShards().get(0).getSize() + ); assertEquals(0, diff.getDeletes().size()); final IndexMetadata indexMetadata3 = new IndexMetadata.Builder(indexName).settings( @@ -296,11 +310,14 @@ public void testGetIndicesRoutingMapDiffShardChanged() { RoutingTable routingTable3 = RoutingTable.builder().addAsNew(indexMetadata3).build(); diff = remoteRoutingTableService.getIndicesRoutingMapDiff(routingTable2, routingTable3); - assertEquals(1, diff.getUpserts().size()); - assertNotNull(diff.getUpserts().get(indexName)); - assertEquals(noOfShards + 1, diff.getUpserts().get(indexName).getShards().size()); - assertEquals(noOfReplicas + 2, diff.getUpserts().get(indexName).getShards().get(0).getSize()); - + assertEquals(0, diff.getUpserts().size()); + assertEquals(1, diff.getDiffs().size()); + assertNotNull(diff.getDiffs().get(indexName)); + assertEquals(noOfShards + 1, diff.getDiffs().get(indexName).apply(routingTable.indicesRouting().get(indexName)).shards().size()); + assertEquals( + noOfReplicas + 2, + diff.getDiffs().get(indexName).apply(routingTable.indicesRouting().get(indexName)).getShards().get(0).getSize() + ); assertEquals(0, diff.getDeletes().size()); } @@ -320,10 +337,10 @@ public void testGetIndicesRoutingMapDiffShardDetailChanged() { DiffableUtils.MapDiff> diff = remoteRoutingTableService .getIndicesRoutingMapDiff(routingTable, routingTable2); - assertEquals(1, diff.getUpserts().size()); - assertNotNull(diff.getUpserts().get(indexName)); - assertEquals(noOfShards, diff.getUpserts().get(indexName).getShards().size()); - assertEquals(noOfReplicas + 1, diff.getUpserts().get(indexName).getShards().get(0).getSize()); + assertEquals(1, diff.getDiffs().size()); + assertNotNull(diff.getDiffs().get(indexName)); + assertEquals(noOfShards, diff.getDiffs().get(indexName).apply(routingTable.indicesRouting().get(indexName)).shards().size()); + assertEquals(0, diff.getUpserts().size()); assertEquals(0, diff.getDeletes().size()); } @@ -552,6 +569,44 @@ public void testGetAsyncIndexRoutingReadAction() throws Exception { assertEquals(clusterState.getRoutingTable().getIndicesRouting().get(indexName), indexRoutingTable); } + public void testGetAsyncIndexRoutingTableDiffReadAction() throws Exception { + String indexName = randomAlphaOfLength(randomIntBetween(1, 50)); + ClusterState currentState = createClusterState(indexName); + + // Get the IndexRoutingTable from the current state + IndexRoutingTable indexRoutingTable = currentState.routingTable().index(indexName); + Map shardRoutingTables = indexRoutingTable.getShards(); + + RoutingTableIncrementalDiff.IndexRoutingTableIncrementalDiff indexRoutingTableDiff = + new RoutingTableIncrementalDiff.IndexRoutingTableIncrementalDiff(new ArrayList<>(shardRoutingTables.values())); + + // Create the map for RoutingTableIncrementalDiff + Map> diffs = new HashMap<>(); + diffs.put(indexName, indexRoutingTableDiff); + + RoutingTableIncrementalDiff diff = new RoutingTableIncrementalDiff(diffs); + + String uploadedFileName = String.format(Locale.ROOT, "routing-table-diff/" + indexName); + when(blobContainer.readBlob(indexName)).thenReturn( + REMOTE_ROUTING_TABLE_DIFF_FORMAT.serialize(diff, uploadedFileName, compressor).streamInput() + ); + + TestCapturingListener listener = new TestCapturingListener<>(); + CountDownLatch latch = new CountDownLatch(1); + + remoteRoutingTableService.getAsyncIndexRoutingTableDiffReadAction( + "cluster-uuid", + uploadedFileName, + new LatchedActionListener<>(listener, latch) + ); + latch.await(); + + assertNull(listener.getFailure()); + assertNotNull(listener.getResult()); + RoutingTableIncrementalDiff resultDiff = listener.getResult(); + assertEquals(diff.getDiffs().size(), resultDiff.getDiffs().size()); + } + public void testGetAsyncIndexRoutingWriteAction() throws Exception { String indexName = randomAlphaOfLength(randomIntBetween(1, 50)); ClusterState clusterState = createClusterState(indexName); @@ -604,6 +659,68 @@ public void testGetAsyncIndexRoutingWriteAction() throws Exception { assertThat(RemoteStoreUtils.invertLong(fileNameTokens[3]), lessThanOrEqualTo(System.currentTimeMillis())); } + public void testGetAsyncIndexRoutingDiffWriteAction() throws Exception { + String indexName = randomAlphaOfLength(randomIntBetween(1, 50)); + ClusterState currentState = createClusterState(indexName); + + // Get the IndexRoutingTable from the current state + IndexRoutingTable indexRoutingTable = currentState.routingTable().index(indexName); + Map shardRoutingTables = indexRoutingTable.getShards(); + + RoutingTableIncrementalDiff.IndexRoutingTableIncrementalDiff indexRoutingTableDiff = + new RoutingTableIncrementalDiff.IndexRoutingTableIncrementalDiff(new ArrayList<>(shardRoutingTables.values())); + + // Create the map for RoutingTableIncrementalDiff + Map> diffs = new HashMap<>(); + diffs.put(indexName, indexRoutingTableDiff); + + // RoutingTableIncrementalDiff diff = new RoutingTableIncrementalDiff(diffs); + + Iterable remotePath = new BlobPath().add("base-path") + .add( + Base64.getUrlEncoder() + .withoutPadding() + .encodeToString(currentState.getClusterName().value().getBytes(StandardCharsets.UTF_8)) + ) + .add("cluster-state") + .add(currentState.metadata().clusterUUID()) + .add(ROUTING_TABLE_DIFF_PATH_TOKEN); + + doAnswer(invocationOnMock -> { + invocationOnMock.getArgument(4, ActionListener.class).onResponse(null); + return null; + }).when(blobStoreTransferService) + .uploadBlob(any(InputStream.class), eq(remotePath), anyString(), eq(WritePriority.URGENT), any(ActionListener.class)); + + TestCapturingListener listener = new TestCapturingListener<>(); + CountDownLatch latch = new CountDownLatch(1); + + remoteRoutingTableService.getAsyncIndexRoutingDiffWriteAction( + currentState.metadata().clusterUUID(), + currentState.term(), + currentState.version(), + diffs, + new LatchedActionListener<>(listener, latch) + ); + latch.await(); + assertNull(listener.getFailure()); + assertNotNull(listener.getResult()); + ClusterMetadataManifest.UploadedMetadata uploadedMetadata = listener.getResult(); + + assertEquals(ROUTING_TABLE_DIFF_FILE, uploadedMetadata.getComponent()); + String uploadedFileName = uploadedMetadata.getUploadedFilename(); + String[] pathTokens = uploadedFileName.split(PATH_DELIMITER); + assertEquals(6, pathTokens.length); + assertEquals(pathTokens[0], "base-path"); + String[] fileNameTokens = pathTokens[5].split(DELIMITER); + + assertEquals(4, fileNameTokens.length); + assertEquals(ROUTING_TABLE_DIFF_METADATA_PREFIX, fileNameTokens[0]); + assertEquals(RemoteStoreUtils.invertLong(1L), fileNameTokens[1]); + assertEquals(RemoteStoreUtils.invertLong(2L), fileNameTokens[2]); + assertThat(RemoteStoreUtils.invertLong(fileNameTokens[3]), lessThanOrEqualTo(System.currentTimeMillis())); + } + public void testGetUpdatedIndexRoutingTableMetadataWhenNoChange() { List updatedIndicesRouting = new ArrayList<>(); List indicesRouting = randomUploadedIndexMetadataList(); @@ -687,4 +804,26 @@ public void testDeleteStaleIndexRoutingPathsThrowsIOException() throws IOExcepti verify(blobContainer).deleteBlobsIgnoringIfNotExists(stalePaths); } + public void testDeleteStaleIndexRoutingDiffPaths() throws IOException { + doNothing().when(blobContainer).deleteBlobsIgnoringIfNotExists(any()); + when(blobStore.blobContainer(any())).thenReturn(blobContainer); + List stalePaths = Arrays.asList("path1", "path2"); + remoteRoutingTableService.doStart(); + remoteRoutingTableService.deleteStaleIndexRoutingDiffPaths(stalePaths); + verify(blobContainer).deleteBlobsIgnoringIfNotExists(stalePaths); + } + + public void testDeleteStaleIndexRoutingDiffPathsThrowsIOException() throws IOException { + when(blobStore.blobContainer(any())).thenReturn(blobContainer); + List stalePaths = Arrays.asList("path1", "path2"); + // Simulate an IOException + doThrow(new IOException("test exception")).when(blobContainer).deleteBlobsIgnoringIfNotExists(Mockito.anyList()); + + remoteRoutingTableService.doStart(); + IOException thrown = assertThrows(IOException.class, () -> { + remoteRoutingTableService.deleteStaleIndexRoutingDiffPaths(stalePaths); + }); + assertEquals("test exception", thrown.getMessage()); + verify(blobContainer).deleteBlobsIgnoringIfNotExists(stalePaths); + } } diff --git a/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java b/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java index 256161af1a3e2..8a6dd6bc96e72 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java @@ -10,9 +10,11 @@ import org.opensearch.Version; import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.DiffableUtils; import org.opensearch.cluster.metadata.IndexGraveyard; import org.opensearch.cluster.metadata.RepositoriesMetadata; import org.opensearch.cluster.metadata.WeightedRoutingMetadata; +import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; @@ -29,9 +31,12 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.function.Function; import java.util.stream.Collectors; +import org.mockito.Mockito; + import static org.opensearch.gateway.remote.ClusterMetadataManifest.CODEC_V0; import static org.opensearch.gateway.remote.ClusterMetadataManifest.CODEC_V1; import static org.opensearch.gateway.remote.RemoteClusterStateAttributesManager.CLUSTER_BLOCKS; @@ -157,7 +162,7 @@ public void testClusterMetadataManifestSerializationEqualsHashCode() { .opensearchVersion(Version.CURRENT) .nodeId("B10RX1f5RJenMQvYccCgSQ") .committed(true) - .codecVersion(ClusterMetadataManifest.CODEC_V2) + .codecVersion(ClusterMetadataManifest.CODEC_V3) .indices(randomUploadedIndexMetadataList()) .previousClusterUUID("yfObdx8KSMKKrXf8UyHhM") .clusterUUIDCommitted(true) @@ -191,7 +196,9 @@ public void testClusterMetadataManifestSerializationEqualsHashCode() { .diffManifest( new ClusterStateDiffManifest( RemoteClusterStateServiceTests.generateClusterStateWithOneIndex().build(), - ClusterState.EMPTY_STATE + ClusterState.EMPTY_STATE, + null, + "indicesRoutingDiffPath" ) ) .build(); @@ -523,7 +530,75 @@ public void testClusterMetadataManifestXContentV2() throws IOException { .diffManifest( new ClusterStateDiffManifest( RemoteClusterStateServiceTests.generateClusterStateWithOneIndex().build(), - ClusterState.EMPTY_STATE + ClusterState.EMPTY_STATE, + null, + null + ) + ) + .build(); + final XContentBuilder builder = JsonXContent.contentBuilder(); + builder.startObject(); + originalManifest.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder))) { + final ClusterMetadataManifest fromXContentManifest = ClusterMetadataManifest.fromXContent(parser); + assertEquals(originalManifest, fromXContentManifest); + } + } + + public void testClusterMetadataManifestXContentV3() throws IOException { + UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "test-uuid", "/test/upload/path"); + UploadedMetadataAttribute uploadedMetadataAttribute = new UploadedMetadataAttribute("attribute_name", "testing_attribute"); + final DiffableUtils.MapDiff> routingTableIncrementalDiff = Mockito.mock( + DiffableUtils.MapDiff.class + ); + ClusterMetadataManifest originalManifest = ClusterMetadataManifest.builder() + .clusterTerm(1L) + .stateVersion(1L) + .clusterUUID("test-cluster-uuid") + .stateUUID("test-state-uuid") + .opensearchVersion(Version.CURRENT) + .nodeId("test-node-id") + .committed(false) + .codecVersion(ClusterMetadataManifest.CODEC_V3) + .indices(Collections.singletonList(uploadedIndexMetadata)) + .previousClusterUUID("prev-cluster-uuid") + .clusterUUIDCommitted(true) + .coordinationMetadata(uploadedMetadataAttribute) + .settingMetadata(uploadedMetadataAttribute) + .templatesMetadata(uploadedMetadataAttribute) + .customMetadataMap( + Collections.unmodifiableList( + Arrays.asList( + new UploadedMetadataAttribute( + CUSTOM_METADATA + CUSTOM_DELIMITER + RepositoriesMetadata.TYPE, + "custom--repositories-file" + ), + new UploadedMetadataAttribute( + CUSTOM_METADATA + CUSTOM_DELIMITER + IndexGraveyard.TYPE, + "custom--index_graveyard-file" + ), + new UploadedMetadataAttribute( + CUSTOM_METADATA + CUSTOM_DELIMITER + WeightedRoutingMetadata.TYPE, + "custom--weighted_routing_netadata-file" + ) + ) + ).stream().collect(Collectors.toMap(UploadedMetadataAttribute::getAttributeName, Function.identity())) + ) + .routingTableVersion(1L) + .indicesRouting(Collections.singletonList(uploadedIndexMetadata)) + .discoveryNodesMetadata(uploadedMetadataAttribute) + .clusterBlocksMetadata(uploadedMetadataAttribute) + .transientSettingsMetadata(uploadedMetadataAttribute) + .hashesOfConsistentSettings(uploadedMetadataAttribute) + .clusterStateCustomMetadataMap(Collections.emptyMap()) + .diffManifest( + new ClusterStateDiffManifest( + RemoteClusterStateServiceTests.generateClusterStateWithOneIndex().build(), + ClusterState.EMPTY_STATE, + routingTableIncrementalDiff, + uploadedMetadataAttribute.getUploadedFilename() ) ) .build(); diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManagerTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManagerTests.java index ec7e3c1ce81d3..b86f23f3d37aa 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManagerTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManagerTests.java @@ -50,6 +50,7 @@ import static org.opensearch.gateway.remote.ClusterMetadataManifest.CODEC_V1; import static org.opensearch.gateway.remote.ClusterMetadataManifest.CODEC_V2; +import static org.opensearch.gateway.remote.ClusterMetadataManifest.CODEC_V3; import static org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedIndexMetadata; import static org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadataAttribute; import static org.opensearch.gateway.remote.RemoteClusterStateCleanupManager.AsyncStaleFileDeletion; @@ -296,6 +297,74 @@ public void testDeleteClusterMetadata() throws IOException { verify(remoteRoutingTableService).deleteStaleIndexRoutingPaths(List.of(index3Metadata.getUploadedFilename())); } + public void testDeleteStaleIndicesRoutingDiffFile() throws IOException { + String clusterUUID = "clusterUUID"; + String clusterName = "test-cluster"; + List inactiveBlobs = Arrays.asList(new PlainBlobMetadata("manifest1.dat", 1L)); + List activeBlobs = Arrays.asList(new PlainBlobMetadata("manifest2.dat", 1L)); + + UploadedMetadataAttribute coordinationMetadata = new UploadedMetadataAttribute(COORDINATION_METADATA, "coordination_metadata"); + UploadedMetadataAttribute templateMetadata = new UploadedMetadataAttribute(TEMPLATES_METADATA, "template_metadata"); + UploadedMetadataAttribute settingMetadata = new UploadedMetadataAttribute(SETTING_METADATA, "settings_metadata"); + UploadedMetadataAttribute coordinationMetadataUpdated = new UploadedMetadataAttribute( + COORDINATION_METADATA, + "coordination_metadata_updated" + ); + + UploadedIndexMetadata index1Metadata = new UploadedIndexMetadata("index1", "indexUUID1", "index_metadata1__2"); + UploadedIndexMetadata index2Metadata = new UploadedIndexMetadata("index2", "indexUUID2", "index_metadata2__2"); + List indicesRouting1 = List.of(index1Metadata); + List indicesRouting2 = List.of(index2Metadata); + ClusterStateDiffManifest diffManifest1 = ClusterStateDiffManifest.builder().indicesRoutingDiffPath("index1RoutingDiffPath").build(); + ClusterStateDiffManifest diffManifest2 = ClusterStateDiffManifest.builder().indicesRoutingDiffPath("index2RoutingDiffPath").build(); + + ClusterMetadataManifest manifest1 = ClusterMetadataManifest.builder() + .indices(List.of(index1Metadata)) + .coordinationMetadata(coordinationMetadataUpdated) + .templatesMetadata(templateMetadata) + .settingMetadata(settingMetadata) + .clusterTerm(1L) + .stateVersion(1L) + .codecVersion(CODEC_V3) + .stateUUID(randomAlphaOfLength(10)) + .clusterUUID(clusterUUID) + .nodeId("nodeA") + .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) + .previousClusterUUID(ClusterState.UNKNOWN_UUID) + .committed(true) + .routingTableVersion(0L) + .indicesRouting(indicesRouting1) + .diffManifest(diffManifest1) + .build(); + ClusterMetadataManifest manifest2 = ClusterMetadataManifest.builder(manifest1) + .indices(List.of(index2Metadata)) + .indicesRouting(indicesRouting2) + .diffManifest(diffManifest2) + .build(); + + BlobContainer blobContainer = mock(BlobContainer.class); + doThrow(IOException.class).when(blobContainer).delete(); + when(blobStore.blobContainer(any())).thenReturn(blobContainer); + BlobPath blobPath = new BlobPath().add("random-path"); + when((blobStoreRepository.basePath())).thenReturn(blobPath); + remoteClusterStateCleanupManager.start(); + when(remoteManifestManager.getManifestFolderPath(eq(clusterName), eq(clusterUUID))).thenReturn( + new BlobPath().add(encodeString(clusterName)).add(CLUSTER_STATE_PATH_TOKEN).add(clusterUUID).add(MANIFEST) + ); + when(remoteManifestManager.fetchRemoteClusterMetadataManifest(eq(clusterName), eq(clusterUUID), any())).thenReturn( + manifest2, + manifest1 + ); + remoteClusterStateCleanupManager = new RemoteClusterStateCleanupManager( + remoteClusterStateService, + clusterService, + remoteRoutingTableService + ); + remoteClusterStateCleanupManager.start(); + remoteClusterStateCleanupManager.deleteClusterMetadata(clusterName, clusterUUID, activeBlobs, inactiveBlobs); + verify(remoteRoutingTableService).deleteStaleIndexRoutingDiffPaths(List.of("index1RoutingDiffPath")); + } + public void testDeleteClusterMetadataNoOpsRoutingTableService() throws IOException { String clusterUUID = "clusterUUID"; String clusterName = "test-cluster"; @@ -515,6 +584,83 @@ public void testIndexRoutingFilesCleanupFailureStats() throws Exception { }); } + public void testIndicesRoutingDiffFilesCleanupFailureStats() throws Exception { + String clusterUUID = "clusterUUID"; + String clusterName = "test-cluster"; + List inactiveBlobs = Arrays.asList(new PlainBlobMetadata("manifest1.dat", 1L)); + List activeBlobs = Arrays.asList(new PlainBlobMetadata("manifest2.dat", 1L)); + + UploadedMetadataAttribute coordinationMetadata = new UploadedMetadataAttribute(COORDINATION_METADATA, "coordination_metadata"); + UploadedMetadataAttribute templateMetadata = new UploadedMetadataAttribute(TEMPLATES_METADATA, "template_metadata"); + UploadedMetadataAttribute settingMetadata = new UploadedMetadataAttribute(SETTING_METADATA, "settings_metadata"); + UploadedMetadataAttribute coordinationMetadataUpdated = new UploadedMetadataAttribute( + COORDINATION_METADATA, + "coordination_metadata_updated" + ); + + UploadedIndexMetadata index1Metadata = new UploadedIndexMetadata("index1", "indexUUID1", "index_metadata1__2"); + UploadedIndexMetadata index2Metadata = new UploadedIndexMetadata("index2", "indexUUID2", "index_metadata2__2"); + List indicesRouting1 = List.of(index1Metadata); + List indicesRouting2 = List.of(index2Metadata); + ClusterStateDiffManifest diffManifest1 = ClusterStateDiffManifest.builder().indicesRoutingDiffPath("index1RoutingDiffPath").build(); + ClusterStateDiffManifest diffManifest2 = ClusterStateDiffManifest.builder().indicesRoutingDiffPath("index2RoutingDiffPath").build(); + + ClusterMetadataManifest manifest1 = ClusterMetadataManifest.builder() + .indices(List.of(index1Metadata)) + .coordinationMetadata(coordinationMetadataUpdated) + .templatesMetadata(templateMetadata) + .settingMetadata(settingMetadata) + .clusterTerm(1L) + .stateVersion(1L) + .codecVersion(CODEC_V3) + .stateUUID(randomAlphaOfLength(10)) + .clusterUUID(clusterUUID) + .nodeId("nodeA") + .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) + .previousClusterUUID(ClusterState.UNKNOWN_UUID) + .committed(true) + .routingTableVersion(0L) + .indicesRouting(indicesRouting1) + .diffManifest(diffManifest1) + .build(); + ClusterMetadataManifest manifest2 = ClusterMetadataManifest.builder(manifest1) + .indices(List.of(index2Metadata)) + .indicesRouting(indicesRouting2) + .diffManifest(diffManifest2) + .build(); + + BlobContainer blobContainer = mock(BlobContainer.class); + doThrow(IOException.class).when(blobContainer).delete(); + when(blobStore.blobContainer(any())).thenReturn(blobContainer); + + BlobPath blobPath = new BlobPath().add("random-path"); + when((blobStoreRepository.basePath())).thenReturn(blobPath); + remoteClusterStateCleanupManager.start(); + when(remoteManifestManager.getManifestFolderPath(eq(clusterName), eq(clusterUUID))).thenReturn( + new BlobPath().add(encodeString(clusterName)).add(CLUSTER_STATE_PATH_TOKEN).add(clusterUUID).add(MANIFEST) + ); + when(remoteManifestManager.fetchRemoteClusterMetadataManifest(eq(clusterName), eq(clusterUUID), any())).thenReturn( + manifest1, + manifest2 + ); + doNothing().when(remoteRoutingTableService).deleteStaleIndexRoutingDiffPaths(any()); + + remoteClusterStateCleanupManager.deleteClusterMetadata(clusterName, clusterUUID, activeBlobs, inactiveBlobs); + assertBusy(() -> { + // wait for stats to get updated + assertNotNull(remoteClusterStateCleanupManager.getStats()); + assertEquals(0, remoteClusterStateCleanupManager.getStats().getIndicesRoutingDiffFileCleanupAttemptFailedCount()); + }); + + doThrow(IOException.class).when(remoteRoutingTableService).deleteStaleIndexRoutingPaths(any()); + remoteClusterStateCleanupManager.deleteClusterMetadata(clusterName, clusterUUID, activeBlobs, inactiveBlobs); + assertBusy(() -> { + // wait for stats to get updated + assertNotNull(remoteClusterStateCleanupManager.getStats()); + assertEquals(1, remoteClusterStateCleanupManager.getStats().getIndicesRoutingDiffFileCleanupAttemptFailedCount()); + }); + } + public void testSingleConcurrentExecutionOfStaleManifestCleanup() throws Exception { BlobContainer blobContainer = mock(BlobContainer.class); when(blobStore.blobContainer(any())).thenReturn(blobContainer); diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java index 6c764585c48e7..59ca62dff2aa7 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java @@ -535,14 +535,15 @@ public void testTimeoutWhileWritingManifestFile() throws IOException { anyBoolean(), anyMap(), anyBoolean(), - anyList() + anyList(), + anyMap() ) ).thenReturn(new RemoteClusterStateUtils.UploadedMetadataResults()); RemoteStateTransferException ex = expectThrows( RemoteStateTransferException.class, () -> spiedService.writeFullMetadata(clusterState, randomAlphaOfLength(10)) ); - assertTrue(ex.getMessage().contains("Timed out waiting for transfer of manifest file to complete")); + assertTrue(ex.getMessage().contains("Timed out waiting for transfer of following metadata to complete")); } public void testWriteFullMetadataInParallelFailureForIndexMetadata() throws IOException { @@ -634,7 +635,8 @@ public void testWriteMetadataInParallelIncompleteUpload() throws IOException { true, clusterState.getCustoms(), true, - emptyList() + emptyList(), + null ) ); assertTrue(exception.getMessage().startsWith("Some metadata components were not uploaded successfully")); @@ -684,7 +686,8 @@ public void testWriteIncrementalMetadataSuccess() throws IOException { eq(false), eq(Collections.emptyMap()), eq(false), - eq(Collections.emptyList()) + eq(Collections.emptyList()), + eq(Collections.emptyMap()) ); assertThat(manifestInfo.getManifestFileName(), notNullValue()); @@ -764,7 +767,8 @@ public void testWriteIncrementalMetadataSuccessWhenPublicationEnabled() throws I eq(false), eq(Collections.emptyMap()), eq(true), - Mockito.anyList() + anyList(), + eq(Collections.emptyMap()) ); assertThat(manifestInfo.getManifestFileName(), notNullValue()); @@ -811,7 +815,8 @@ public void testTimeoutWhileWritingMetadata() throws IOException { true, emptyMap(), true, - emptyList() + emptyList(), + null ) ); assertTrue(exception.getMessage().startsWith("Timed out waiting for transfer of following metadata to complete")); @@ -862,6 +867,7 @@ public void testGetClusterStateForManifest_IncludeEphemeral() throws IOException eq(manifest.getIndicesRouting()), eq(true), eq(manifest.getClusterStateCustomMap()), + eq(false), eq(true) ); } @@ -911,7 +917,9 @@ public void testGetClusterStateForManifest_ExcludeEphemeral() throws IOException eq(emptyList()), eq(false), eq(emptyMap()), + eq(false), eq(false) + ); } @@ -958,6 +966,7 @@ public void testGetClusterStateFromManifest_CodecV1() throws IOException { eq(emptyList()), eq(false), eq(emptyMap()), + eq(false), eq(false) ); verify(mockedGlobalMetadataManager, times(1)).getGlobalMetadata(eq(manifest.getClusterUUID()), eq(manifest)); @@ -1281,6 +1290,7 @@ public void testReadClusterStateInParallel_TimedOut() throws IOException { emptyList(), true, emptyMap(), + false, true ) ); @@ -1312,6 +1322,7 @@ public void testReadClusterStateInParallel_ExceptionDuringRead() throws IOExcept emptyList(), true, emptyMap(), + false, true ) ); @@ -1418,6 +1429,7 @@ public void testReadClusterStateInParallel_UnexpectedResult() throws IOException emptyList(), true, newClusterStateCustoms, + false, true ) ); @@ -1652,6 +1664,7 @@ public void testReadClusterStateInParallel_Success() throws IOException { emptyList(), true, newClusterStateCustoms, + false, true ); @@ -2745,6 +2758,108 @@ public void testWriteIncrementalMetadataSuccessWithRoutingTable() throws IOExcep assertThat(manifest.getIndicesRouting().get(0).getUploadedFilename(), notNullValue()); } + public void testWriteIncrementalMetadataSuccessWithRoutingTableDiff() throws IOException { + initializeRoutingTable(); + final ClusterState clusterState = generateClusterStateWithOneIndex("test-index", 5, 1, false).nodes( + nodesWithLocalNodeClusterManager() + ).build(); + mockBlobStoreObjects(); + List indices = new ArrayList<>(); + final UploadedIndexMetadata uploadedIndiceRoutingMetadata = new UploadedIndexMetadata( + "test-index", + "index-uuid", + "routing-filename", + INDEX_ROUTING_METADATA_PREFIX + ); + indices.add(uploadedIndiceRoutingMetadata); + final ClusterState previousClusterState = generateClusterStateWithOneIndex("test-index", 5, 1, true).nodes( + nodesWithLocalNodeClusterManager() + ).build(); + + final ClusterMetadataManifest previousManifest = ClusterMetadataManifest.builder().indices(indices).build(); + when((blobStoreRepository.basePath())).thenReturn(BlobPath.cleanPath().add("base-path")); + + remoteClusterStateService.start(); + final ClusterMetadataManifest manifest = remoteClusterStateService.writeIncrementalMetadata( + previousClusterState, + clusterState, + previousManifest + ).getClusterMetadataManifest(); + final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "index-uuid", "metadata-filename"); + final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() + .indices(List.of(uploadedIndexMetadata)) + .clusterTerm(clusterState.term()) + .stateVersion(1L) + .stateUUID("state-uuid") + .clusterUUID("cluster-uuid") + .previousClusterUUID("prev-cluster-uuid") + .routingTableVersion(1) + .indicesRouting(List.of(uploadedIndiceRoutingMetadata)) + .build(); + + assertThat(manifest.getIndices().size(), is(1)); + assertThat(manifest.getClusterTerm(), is(expectedManifest.getClusterTerm())); + assertThat(manifest.getStateVersion(), is(expectedManifest.getStateVersion())); + assertThat(manifest.getClusterUUID(), is(expectedManifest.getClusterUUID())); + assertThat(manifest.getStateUUID(), is(expectedManifest.getStateUUID())); + assertThat(manifest.getRoutingTableVersion(), is(expectedManifest.getRoutingTableVersion())); + assertThat(manifest.getIndicesRouting().get(0).getIndexName(), is(uploadedIndiceRoutingMetadata.getIndexName())); + assertThat(manifest.getIndicesRouting().get(0).getIndexUUID(), is(uploadedIndiceRoutingMetadata.getIndexUUID())); + assertThat(manifest.getIndicesRouting().get(0).getUploadedFilename(), notNullValue()); + assertThat(manifest.getDiffManifest().getIndicesRoutingDiffPath(), notNullValue()); + } + + public void testWriteIncrementalMetadataSuccessWithRoutingTableDiffNull() throws IOException { + initializeRoutingTable(); + final ClusterState clusterState = generateClusterStateWithOneIndex("test-index", 5, 1, false).nodes( + nodesWithLocalNodeClusterManager() + ).build(); + mockBlobStoreObjects(); + List indices = new ArrayList<>(); + final UploadedIndexMetadata uploadedIndiceRoutingMetadata = new UploadedIndexMetadata( + "test-index", + "index-uuid", + "routing-filename", + INDEX_ROUTING_METADATA_PREFIX + ); + indices.add(uploadedIndiceRoutingMetadata); + final ClusterState previousClusterState = generateClusterStateWithOneIndex("test-index2", 5, 1, false).nodes( + nodesWithLocalNodeClusterManager() + ).build(); + + final ClusterMetadataManifest previousManifest = ClusterMetadataManifest.builder().indices(indices).build(); + when((blobStoreRepository.basePath())).thenReturn(BlobPath.cleanPath().add("base-path")); + + remoteClusterStateService.start(); + final ClusterMetadataManifest manifest = remoteClusterStateService.writeIncrementalMetadata( + previousClusterState, + clusterState, + previousManifest + ).getClusterMetadataManifest(); + final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "index-uuid", "metadata-filename"); + final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() + .indices(List.of(uploadedIndexMetadata)) + .clusterTerm(clusterState.term()) + .stateVersion(1L) + .stateUUID("state-uuid") + .clusterUUID("cluster-uuid") + .previousClusterUUID("prev-cluster-uuid") + .routingTableVersion(1) + .indicesRouting(List.of(uploadedIndiceRoutingMetadata)) + .build(); + + assertThat(manifest.getIndices().size(), is(1)); + assertThat(manifest.getClusterTerm(), is(expectedManifest.getClusterTerm())); + assertThat(manifest.getStateVersion(), is(expectedManifest.getStateVersion())); + assertThat(manifest.getClusterUUID(), is(expectedManifest.getClusterUUID())); + assertThat(manifest.getStateUUID(), is(expectedManifest.getStateUUID())); + assertThat(manifest.getRoutingTableVersion(), is(expectedManifest.getRoutingTableVersion())); + assertThat(manifest.getIndicesRouting().get(0).getIndexName(), is(uploadedIndiceRoutingMetadata.getIndexName())); + assertThat(manifest.getIndicesRouting().get(0).getIndexUUID(), is(uploadedIndiceRoutingMetadata.getIndexUUID())); + assertThat(manifest.getIndicesRouting().get(0).getUploadedFilename(), notNullValue()); + assertThat(manifest.getDiffManifest().getIndicesRoutingDiffPath(), nullValue()); + } + private void initializeRoutingTable() { Settings newSettings = Settings.builder() .put("node.attr." + REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY, "routing_repository") @@ -3217,6 +3332,54 @@ static ClusterState.Builder generateClusterStateWithOneIndex() { .routingTable(RoutingTable.builder().addAsNew(indexMetadata).version(1L).build()); } + public static ClusterState.Builder generateClusterStateWithOneIndex( + String indexName, + int primaryShards, + int replicaShards, + boolean addAsNew + ) { + + final Index index = new Index(indexName, "index-uuid"); + final Settings idxSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) + .build(); + final IndexMetadata indexMetadata = new IndexMetadata.Builder(index.getName()).settings(idxSettings) + .numberOfShards(primaryShards) + .numberOfReplicas(replicaShards) + .build(); + final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder().term(1L).build(); + final Settings settings = Settings.builder().put("mock-settings", true).build(); + final TemplatesMetadata templatesMetadata = TemplatesMetadata.builder() + .put(IndexTemplateMetadata.builder("template1").settings(idxSettings).patterns(List.of("test*")).build()) + .build(); + final CustomMetadata1 customMetadata1 = new CustomMetadata1("custom-metadata-1"); + + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + if (addAsNew) { + routingTableBuilder.addAsNew(indexMetadata); + } else { + routingTableBuilder.addAsRecovery(indexMetadata); + } + + return ClusterState.builder(ClusterName.DEFAULT) + .version(1L) + .stateUUID("state-uuid") + .metadata( + Metadata.builder() + .version(randomNonNegativeLong()) + .put(indexMetadata, true) + .clusterUUID("cluster-uuid") + .coordinationMetadata(coordinationMetadata) + .persistentSettings(settings) + .templates(templatesMetadata) + .hashesOfConsistentSettings(Map.of("key1", "value1", "key2", "value2")) + .putCustom(customMetadata1.getWriteableName(), customMetadata1) + .build() + ) + .routingTable(routingTableBuilder.version(1L).build()); + } + static ClusterState.Builder generateClusterStateWithAllAttributes() { final Index index = new Index("test-index", "index-uuid"); final Settings idxSettings = Settings.builder() @@ -3296,7 +3459,7 @@ static ClusterMetadataManifest.Builder generateClusterMetadataManifestWithAllAtt ); } - static DiscoveryNodes nodesWithLocalNodeClusterManager() { + public static DiscoveryNodes nodesWithLocalNodeClusterManager() { final DiscoveryNode localNode = new DiscoveryNode("cluster-manager-id", buildNewFakeTransportAddress(), Version.CURRENT); return DiscoveryNodes.builder().clusterManagerNodeId("cluster-manager-id").localNodeId("cluster-manager-id").add(localNode).build(); } diff --git a/server/src/test/java/org/opensearch/gateway/remote/model/ClusterStateDiffManifestTests.java b/server/src/test/java/org/opensearch/gateway/remote/model/ClusterStateDiffManifestTests.java index 897b2f5eeb25d..f89619a09cd52 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/model/ClusterStateDiffManifestTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/model/ClusterStateDiffManifestTests.java @@ -10,6 +10,7 @@ import org.opensearch.Version; import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.DiffableUtils; import org.opensearch.cluster.coordination.CoordinationMetadata; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexTemplateMetadata; @@ -17,6 +18,7 @@ import org.opensearch.cluster.metadata.TemplatesMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.bytes.BytesReference; @@ -40,7 +42,11 @@ import static java.util.stream.Collectors.toList; import static org.opensearch.Version.CURRENT; import static org.opensearch.cluster.ClusterState.EMPTY_STATE; +import static org.opensearch.cluster.routing.remote.RemoteRoutingTableService.CUSTOM_ROUTING_TABLE_DIFFABLE_VALUE_SERIALIZER; import static org.opensearch.core.common.transport.TransportAddress.META_ADDRESS; +import static org.opensearch.gateway.remote.ClusterMetadataManifest.CODEC_V3; +import static org.opensearch.gateway.remote.RemoteClusterStateServiceTests.generateClusterStateWithOneIndex; +import static org.opensearch.gateway.remote.RemoteClusterStateServiceTests.nodesWithLocalNodeClusterManager; import static org.opensearch.gateway.remote.model.RemoteClusterBlocksTests.randomClusterBlocks; public class ClusterStateDiffManifestTests extends OpenSearchTestCase { @@ -114,11 +120,70 @@ public void testClusterStateDiffManifestXContent() throws IOException { diffManifest.toXContent(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder))) { - final ClusterStateDiffManifest parsedManifest = ClusterStateDiffManifest.fromXContent(parser); + final ClusterStateDiffManifest parsedManifest = ClusterStateDiffManifest.fromXContent(parser, CODEC_V3); assertEquals(diffManifest, parsedManifest); } } + public void testClusterStateWithRoutingTableDiffInDiffManifestXContent() throws IOException { + ClusterState initialState = generateClusterStateWithOneIndex("test-index", 5, 1, true).nodes(nodesWithLocalNodeClusterManager()) + .build(); + + ClusterState updatedState = generateClusterStateWithOneIndex("test-index", 5, 2, false).nodes(nodesWithLocalNodeClusterManager()) + .build(); + + ClusterStateDiffManifest diffManifest = verifyRoutingTableDiffManifest(initialState, updatedState); + final XContentBuilder builder = JsonXContent.contentBuilder(); + builder.startObject(); + diffManifest.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder))) { + final ClusterStateDiffManifest parsedManifest = ClusterStateDiffManifest.fromXContent(parser, CODEC_V3); + assertEquals(diffManifest, parsedManifest); + } + } + + public void testClusterStateWithRoutingTableDiffInDiffManifestXContent1() throws IOException { + ClusterState initialState = generateClusterStateWithOneIndex("test-index", 5, 1, true).nodes(nodesWithLocalNodeClusterManager()) + .build(); + + ClusterState updatedState = generateClusterStateWithOneIndex("test-index-1", 5, 2, false).nodes(nodesWithLocalNodeClusterManager()) + .build(); + + ClusterStateDiffManifest diffManifest = verifyRoutingTableDiffManifest(initialState, updatedState); + final XContentBuilder builder = JsonXContent.contentBuilder(); + builder.startObject(); + diffManifest.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder))) { + final ClusterStateDiffManifest parsedManifest = ClusterStateDiffManifest.fromXContent(parser, CODEC_V3); + assertEquals(diffManifest, parsedManifest); + } + } + + private ClusterStateDiffManifest verifyRoutingTableDiffManifest(ClusterState previousState, ClusterState currentState) { + // Create initial and updated IndexRoutingTable maps + Map initialRoutingTableMap = previousState.getRoutingTable().indicesRouting(); + Map updatedRoutingTableMap = currentState.getRoutingTable().indicesRouting(); + + DiffableUtils.MapDiff> routingTableIncrementalDiff = DiffableUtils.diff( + initialRoutingTableMap, + updatedRoutingTableMap, + DiffableUtils.getStringKeySerializer(), + CUSTOM_ROUTING_TABLE_DIFFABLE_VALUE_SERIALIZER + ); + ClusterStateDiffManifest manifest = new ClusterStateDiffManifest( + currentState, + previousState, + routingTableIncrementalDiff, + "indicesRoutingDiffPath" + ); + assertEquals("indicesRoutingDiffPath", manifest.getIndicesRoutingDiffPath()); + assertEquals(routingTableIncrementalDiff.getUpserts().size(), manifest.getIndicesRoutingUpdated().size()); + assertEquals(routingTableIncrementalDiff.getDeletes().size(), manifest.getIndicesRoutingDeleted().size()); + return manifest; + } + private ClusterStateDiffManifest updateAndVerifyState( ClusterState initialState, List indicesToAdd, @@ -191,7 +256,7 @@ private ClusterStateDiffManifest updateAndVerifyState( } ClusterState updatedClusterState = clusterStateBuilder.metadata(metadataBuilder.build()).build(); - ClusterStateDiffManifest manifest = new ClusterStateDiffManifest(updatedClusterState, initialState); + ClusterStateDiffManifest manifest = new ClusterStateDiffManifest(updatedClusterState, initialState, null, null); assertEquals(indicesToAdd.stream().map(im -> im.getIndex().getName()).collect(toList()), manifest.getIndicesUpdated()); assertEquals(indicesToRemove, manifest.getIndicesDeleted()); assertEquals(new ArrayList<>(customsToAdd.keySet()), manifest.getCustomMetadataUpdated()); diff --git a/server/src/test/java/org/opensearch/gateway/remote/routingtable/RemoteIndexRoutingTableDiffTests.java b/server/src/test/java/org/opensearch/gateway/remote/routingtable/RemoteIndexRoutingTableDiffTests.java new file mode 100644 index 0000000000000..6ffa7fc5cded8 --- /dev/null +++ b/server/src/test/java/org/opensearch/gateway/remote/routingtable/RemoteIndexRoutingTableDiffTests.java @@ -0,0 +1,317 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote.routingtable; + +import org.opensearch.Version; +import org.opensearch.cluster.Diff; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.RoutingTableIncrementalDiff; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.compress.DeflateCompressor; +import org.opensearch.common.remote.BlobPathParameters; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.compress.Compressor; +import org.opensearch.core.compress.NoneCompressor; +import org.opensearch.gateway.remote.ClusterMetadataManifest; +import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.index.translog.transfer.BlobStoreTransferService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.io.InputStream; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.opensearch.gateway.remote.routingtable.RemoteRoutingTableDiff.ROUTING_TABLE_DIFF_FILE; +import static org.opensearch.gateway.remote.routingtable.RemoteRoutingTableDiff.ROUTING_TABLE_DIFF_METADATA_PREFIX; +import static org.opensearch.gateway.remote.routingtable.RemoteRoutingTableDiff.ROUTING_TABLE_DIFF_PATH_TOKEN; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class RemoteIndexRoutingTableDiffTests extends OpenSearchTestCase { + + private static final String TEST_BLOB_NAME = "/test-path/test-blob-name"; + private static final String TEST_BLOB_PATH = "test-path"; + private static final String TEST_BLOB_FILE_NAME = "test-blob-name"; + private static final long STATE_VERSION = 3L; + private static final long STATE_TERM = 2L; + private String clusterUUID; + private BlobStoreRepository blobStoreRepository; + private BlobStoreTransferService blobStoreTransferService; + private ClusterSettings clusterSettings; + private Compressor compressor; + + private String clusterName; + private NamedWriteableRegistry namedWriteableRegistry; + private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); + + @Before + public void setup() { + clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + this.clusterUUID = "test-cluster-uuid"; + this.blobStoreTransferService = mock(BlobStoreTransferService.class); + this.blobStoreRepository = mock(BlobStoreRepository.class); + BlobPath blobPath = new BlobPath().add("/path"); + when(blobStoreRepository.basePath()).thenReturn(blobPath); + when(blobStoreRepository.getCompressor()).thenReturn(new DeflateCompressor()); + compressor = new NoneCompressor(); + namedWriteableRegistry = writableRegistry(); + this.clusterName = "test-cluster-name"; + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdown(); + } + + public void testClusterUUID() { + Map> diffs = new HashMap<>(); + String indexName = randomAlphaOfLength(randomIntBetween(1, 50)); + int numberOfShards = randomIntBetween(1, 10); + int numberOfReplicas = randomIntBetween(1, 10); + + IndexMetadata indexMetadata = IndexMetadata.builder(indexName) + .settings(settings(Version.CURRENT)) + .numberOfShards(numberOfShards) + .numberOfReplicas(numberOfReplicas) + .build(); + + IndexRoutingTable indexRoutingTable = IndexRoutingTable.builder(indexMetadata.getIndex()).initializeAsNew(indexMetadata).build(); + + diffs.put(indexName, indexRoutingTable.diff(indexRoutingTable)); + + RoutingTableIncrementalDiff routingTableIncrementalDiff = new RoutingTableIncrementalDiff(diffs); + + RemoteRoutingTableDiff remoteDiffForUpload = new RemoteRoutingTableDiff( + routingTableIncrementalDiff, + clusterUUID, + compressor, + STATE_TERM, + STATE_VERSION + ); + assertEquals(remoteDiffForUpload.clusterUUID(), clusterUUID); + + RemoteRoutingTableDiff remoteDiffForDownload = new RemoteRoutingTableDiff(TEST_BLOB_NAME, clusterUUID, compressor); + assertEquals(remoteDiffForDownload.clusterUUID(), clusterUUID); + } + + public void testFullBlobName() { + Map> diffs = new HashMap<>(); + String indexName = randomAlphaOfLength(randomIntBetween(1, 50)); + int numberOfShards = randomIntBetween(1, 10); + int numberOfReplicas = randomIntBetween(1, 10); + + IndexMetadata indexMetadata = IndexMetadata.builder(indexName) + .settings(settings(Version.CURRENT)) + .numberOfShards(numberOfShards) + .numberOfReplicas(numberOfReplicas) + .build(); + + IndexRoutingTable indexRoutingTable = IndexRoutingTable.builder(indexMetadata.getIndex()).initializeAsNew(indexMetadata).build(); + + diffs.put(indexName, indexRoutingTable.diff(indexRoutingTable)); + RoutingTableIncrementalDiff routingTableIncrementalDiff = new RoutingTableIncrementalDiff(diffs); + + RemoteRoutingTableDiff remoteDiffForUpload = new RemoteRoutingTableDiff( + routingTableIncrementalDiff, + clusterUUID, + compressor, + STATE_TERM, + STATE_VERSION + ); + assertThat(remoteDiffForUpload.getFullBlobName(), nullValue()); + + RemoteRoutingTableDiff remoteDiffForDownload = new RemoteRoutingTableDiff(TEST_BLOB_NAME, clusterUUID, compressor); + assertThat(remoteDiffForDownload.getFullBlobName(), is(TEST_BLOB_NAME)); + } + + public void testBlobFileName() { + Map> diffs = new HashMap<>(); + String indexName = randomAlphaOfLength(randomIntBetween(1, 50)); + int numberOfShards = randomIntBetween(1, 10); + int numberOfReplicas = randomIntBetween(1, 10); + + IndexMetadata indexMetadata = IndexMetadata.builder(indexName) + .settings(settings(Version.CURRENT)) + .numberOfShards(numberOfShards) + .numberOfReplicas(numberOfReplicas) + .build(); + + IndexRoutingTable indexRoutingTable = IndexRoutingTable.builder(indexMetadata.getIndex()).initializeAsNew(indexMetadata).build(); + + diffs.put(indexName, indexRoutingTable.diff(indexRoutingTable)); + RoutingTableIncrementalDiff routingTableIncrementalDiff = new RoutingTableIncrementalDiff(diffs); + + RemoteRoutingTableDiff remoteDiffForUpload = new RemoteRoutingTableDiff( + routingTableIncrementalDiff, + clusterUUID, + compressor, + STATE_TERM, + STATE_VERSION + ); + assertThat(remoteDiffForUpload.getBlobFileName(), nullValue()); + + RemoteRoutingTableDiff remoteDiffForDownload = new RemoteRoutingTableDiff(TEST_BLOB_NAME, clusterUUID, compressor); + assertThat(remoteDiffForDownload.getBlobFileName(), is(TEST_BLOB_FILE_NAME)); + } + + public void testBlobPathParameters() { + Map> diffs = new HashMap<>(); + String indexName = randomAlphaOfLength(randomIntBetween(1, 50)); + int numberOfShards = randomIntBetween(1, 10); + int numberOfReplicas = randomIntBetween(1, 10); + + IndexMetadata indexMetadata = IndexMetadata.builder(indexName) + .settings(settings(Version.CURRENT)) + .numberOfShards(numberOfShards) + .numberOfReplicas(numberOfReplicas) + .build(); + + IndexRoutingTable indexRoutingTable = IndexRoutingTable.builder(indexMetadata.getIndex()).initializeAsNew(indexMetadata).build(); + + diffs.put(indexName, indexRoutingTable.diff(indexRoutingTable)); + RoutingTableIncrementalDiff routingTableIncrementalDiff = new RoutingTableIncrementalDiff(diffs); + + RemoteRoutingTableDiff remoteDiffForUpload = new RemoteRoutingTableDiff( + routingTableIncrementalDiff, + clusterUUID, + compressor, + STATE_TERM, + STATE_VERSION + ); + assertThat(remoteDiffForUpload.getBlobFileName(), nullValue()); + + BlobPathParameters params = remoteDiffForUpload.getBlobPathParameters(); + assertThat(params.getPathTokens(), is(List.of(ROUTING_TABLE_DIFF_PATH_TOKEN))); + String expectedPrefix = ROUTING_TABLE_DIFF_METADATA_PREFIX; + assertThat(params.getFilePrefix(), is(expectedPrefix)); + } + + public void testGenerateBlobFileName() { + Map> diffs = new HashMap<>(); + String indexName = randomAlphaOfLength(randomIntBetween(1, 50)); + int numberOfShards = randomIntBetween(1, 10); + int numberOfReplicas = randomIntBetween(1, 10); + + IndexMetadata indexMetadata = IndexMetadata.builder(indexName) + .settings(settings(Version.CURRENT)) + .numberOfShards(numberOfShards) + .numberOfReplicas(numberOfReplicas) + .build(); + + IndexRoutingTable indexRoutingTable = IndexRoutingTable.builder(indexMetadata.getIndex()).initializeAsNew(indexMetadata).build(); + + diffs.put(indexName, indexRoutingTable.diff(indexRoutingTable)); + RoutingTableIncrementalDiff routingTableIncrementalDiff = new RoutingTableIncrementalDiff(diffs); + + RemoteRoutingTableDiff remoteDiffForUpload = new RemoteRoutingTableDiff( + routingTableIncrementalDiff, + clusterUUID, + compressor, + STATE_TERM, + STATE_VERSION + ); + + String blobFileName = remoteDiffForUpload.generateBlobFileName(); + String[] nameTokens = blobFileName.split("__"); + assertEquals(ROUTING_TABLE_DIFF_METADATA_PREFIX, nameTokens[0]); + assertEquals(RemoteStoreUtils.invertLong(STATE_TERM), nameTokens[1]); + assertEquals(RemoteStoreUtils.invertLong(STATE_VERSION), nameTokens[2]); + assertThat(RemoteStoreUtils.invertLong(nameTokens[3]), lessThanOrEqualTo(System.currentTimeMillis())); + } + + public void testGetUploadedMetadata() throws IOException { + Map> diffs = new HashMap<>(); + String indexName = randomAlphaOfLength(randomIntBetween(1, 50)); + int numberOfShards = randomIntBetween(1, 10); + int numberOfReplicas = randomIntBetween(1, 10); + + IndexMetadata indexMetadata = IndexMetadata.builder(indexName) + .settings(settings(Version.CURRENT)) + .numberOfShards(numberOfShards) + .numberOfReplicas(numberOfReplicas) + .build(); + + IndexRoutingTable indexRoutingTable = IndexRoutingTable.builder(indexMetadata.getIndex()).initializeAsNew(indexMetadata).build(); + + diffs.put(indexName, indexRoutingTable.diff(indexRoutingTable)); + RoutingTableIncrementalDiff routingTableIncrementalDiff = new RoutingTableIncrementalDiff(diffs); + + RemoteRoutingTableDiff remoteDiffForUpload = new RemoteRoutingTableDiff( + routingTableIncrementalDiff, + clusterUUID, + compressor, + STATE_TERM, + STATE_VERSION + ); + + remoteDiffForUpload.setFullBlobName(new BlobPath().add(TEST_BLOB_PATH)); + ClusterMetadataManifest.UploadedMetadata uploadedMetadataAttribute = remoteDiffForUpload.getUploadedMetadata(); + assertEquals(ROUTING_TABLE_DIFF_FILE, uploadedMetadataAttribute.getComponent()); + } + + public void testStreamOperations() throws IOException { + String indexName = randomAlphaOfLength(randomIntBetween(1, 50)); + int numberOfShards = randomIntBetween(1, 10); + int numberOfReplicas = randomIntBetween(1, 10); + + Metadata metadata = Metadata.builder() + .put( + IndexMetadata.builder(indexName) + .settings(settings(Version.CURRENT)) + .numberOfShards(numberOfShards) + .numberOfReplicas(numberOfReplicas) + ) + .build(); + + RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metadata.index(indexName)).build(); + Map> diffs = new HashMap<>(); + + initialRoutingTable.getIndicesRouting().values().forEach(indexRoutingTable -> { + diffs.put(indexName, indexRoutingTable.diff(indexRoutingTable)); + RoutingTableIncrementalDiff routingTableIncrementalDiff = new RoutingTableIncrementalDiff(diffs); + + RemoteRoutingTableDiff remoteDiffForUpload = new RemoteRoutingTableDiff( + routingTableIncrementalDiff, + clusterUUID, + compressor, + STATE_TERM, + STATE_VERSION + ); + + assertThrows(AssertionError.class, remoteDiffForUpload::getUploadedMetadata); + + try (InputStream inputStream = remoteDiffForUpload.serialize()) { + remoteDiffForUpload.setFullBlobName(BlobPath.cleanPath()); + assertThat(inputStream.available(), greaterThan(0)); + + routingTableIncrementalDiff = remoteDiffForUpload.deserialize(inputStream); + assertEquals(remoteDiffForUpload.getDiffs().size(), routingTableIncrementalDiff.getDiffs().size()); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + } +} From 5026af61b2b8cd5695c3945508a5fae2f4267de8 Mon Sep 17 00:00:00 2001 From: Pranshu Shukla <55992439+Pranshu-S@users.noreply.github.com> Date: Tue, 23 Jul 2024 20:24:00 +0530 Subject: [PATCH 04/68] Optimized ClusterStatsIndices to precomute shard stats (#14426) * Optimize Cluster Stats Indices to precomute node level stats Signed-off-by: Pranshu Shukla --- CHANGELOG.md | 1 + .../admin/cluster/stats/ClusterStatsIT.java | 119 ++++++-- .../cluster/stats/ClusterStatsIndices.java | 67 +++-- .../stats/ClusterStatsNodeResponse.java | 133 ++++++++- .../cluster/stats/ClusterStatsRequest.java | 17 ++ .../stats/ClusterStatsRequestBuilder.java | 5 + .../stats/TransportClusterStatsAction.java | 10 +- .../admin/cluster/RestClusterStatsAction.java | 1 + .../cluster/stats/ClusterStatsNodesTests.java | 269 ++++++++++++++++++ 9 files changed, 584 insertions(+), 38 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c8f185ca2bb3d..6aa3d7a58dda4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,6 +30,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add persian_stem filter (([#14847](https://github.com/opensearch-project/OpenSearch/pull/14847))) - Create listener to refresh search thread resource usage ([#14832](https://github.com/opensearch-project/OpenSearch/pull/14832)) - Add rest, transport layer changes for hot to warm tiering - dedicated setup (([#13980](https://github.com/opensearch-project/OpenSearch/pull/13980)) +- Optimize Cluster Stats Indices to precomute node level stats ([#14426](https://github.com/opensearch-project/OpenSearch/pull/14426)) ### Dependencies - Bump `org.gradle.test-retry` from 1.5.8 to 1.5.9 ([#13442](https://github.com/opensearch-project/OpenSearch/pull/13442)) diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java index 085a32593063a..f23cdbb50b37a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java @@ -88,7 +88,11 @@ public void testNodeCounts() { Map expectedCounts = getExpectedCounts(1, 1, 1, 1, 1, 0, 0); int numNodes = randomIntBetween(1, 5); - ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get(); + ClusterStatsResponse response = client().admin() + .cluster() + .prepareClusterStats() + .useAggregatedNodeLevelResponses(randomBoolean()) + .get(); assertCounts(response.getNodesStats().getCounts(), total, expectedCounts); for (int i = 0; i < numNodes; i++) { @@ -153,7 +157,11 @@ public void testNodeCountsWithDeprecatedMasterRole() throws ExecutionException, Map expectedCounts = getExpectedCounts(0, 1, 1, 0, 0, 0, 0); Client client = client(); - ClusterStatsResponse response = client.admin().cluster().prepareClusterStats().get(); + ClusterStatsResponse response = client.admin() + .cluster() + .prepareClusterStats() + .useAggregatedNodeLevelResponses(randomBoolean()) + .get(); assertCounts(response.getNodesStats().getCounts(), total, expectedCounts); Set expectedRoles = Set.of(DiscoveryNodeRole.MASTER_ROLE.roleName()); @@ -176,15 +184,60 @@ private void assertShardStats(ClusterStatsIndices.ShardStats stats, int indices, assertThat(stats.getReplication(), Matchers.equalTo(replicationFactor)); } - public void testIndicesShardStats() throws ExecutionException, InterruptedException { + public void testIndicesShardStatsWithoutNodeLevelAggregations() { + internalCluster().startNode(); + ensureGreen(); + ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().useAggregatedNodeLevelResponses(false).get(); + assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN)); + + prepareCreate("test1").setSettings(Settings.builder().put("number_of_shards", 2).put("number_of_replicas", 1)).get(); + + response = client().admin().cluster().prepareClusterStats().useAggregatedNodeLevelResponses(false).get(); + assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.YELLOW)); + assertThat(response.indicesStats.getDocs().getCount(), Matchers.equalTo(0L)); + assertThat(response.indicesStats.getIndexCount(), Matchers.equalTo(1)); + assertShardStats(response.getIndicesStats().getShards(), 1, 2, 2, 0.0); + + // add another node, replicas should get assigned + internalCluster().startNode(); + ensureGreen(); + index("test1", "type", "1", "f", "f"); + refresh(); // make the doc visible + response = client().admin().cluster().prepareClusterStats().useAggregatedNodeLevelResponses(false).get(); + assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN)); + assertThat(response.indicesStats.getDocs().getCount(), Matchers.equalTo(1L)); + assertShardStats(response.getIndicesStats().getShards(), 1, 4, 2, 1.0); + + prepareCreate("test2").setSettings(Settings.builder().put("number_of_shards", 3).put("number_of_replicas", 0)).get(); + ensureGreen(); + response = client().admin().cluster().prepareClusterStats().useAggregatedNodeLevelResponses(false).get(); + assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN)); + assertThat(response.indicesStats.getIndexCount(), Matchers.equalTo(2)); + assertShardStats(response.getIndicesStats().getShards(), 2, 7, 5, 2.0 / 5); + + assertThat(response.getIndicesStats().getShards().getAvgIndexPrimaryShards(), Matchers.equalTo(2.5)); + assertThat(response.getIndicesStats().getShards().getMinIndexPrimaryShards(), Matchers.equalTo(2)); + assertThat(response.getIndicesStats().getShards().getMaxIndexPrimaryShards(), Matchers.equalTo(3)); + + assertThat(response.getIndicesStats().getShards().getAvgIndexShards(), Matchers.equalTo(3.5)); + assertThat(response.getIndicesStats().getShards().getMinIndexShards(), Matchers.equalTo(3)); + assertThat(response.getIndicesStats().getShards().getMaxIndexShards(), Matchers.equalTo(4)); + + assertThat(response.getIndicesStats().getShards().getAvgIndexReplication(), Matchers.equalTo(0.5)); + assertThat(response.getIndicesStats().getShards().getMinIndexReplication(), Matchers.equalTo(0.0)); + assertThat(response.getIndicesStats().getShards().getMaxIndexReplication(), Matchers.equalTo(1.0)); + + } + + public void testIndicesShardStatsWithNodeLevelAggregations() { internalCluster().startNode(); ensureGreen(); - ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get(); + ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().useAggregatedNodeLevelResponses(true).get(); assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN)); prepareCreate("test1").setSettings(Settings.builder().put("number_of_shards", 2).put("number_of_replicas", 1)).get(); - response = client().admin().cluster().prepareClusterStats().get(); + response = client().admin().cluster().prepareClusterStats().useAggregatedNodeLevelResponses(true).get(); assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.YELLOW)); assertThat(response.indicesStats.getDocs().getCount(), Matchers.equalTo(0L)); assertThat(response.indicesStats.getIndexCount(), Matchers.equalTo(1)); @@ -195,14 +248,14 @@ public void testIndicesShardStats() throws ExecutionException, InterruptedExcept ensureGreen(); index("test1", "type", "1", "f", "f"); refresh(); // make the doc visible - response = client().admin().cluster().prepareClusterStats().get(); + response = client().admin().cluster().prepareClusterStats().useAggregatedNodeLevelResponses(true).get(); assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN)); assertThat(response.indicesStats.getDocs().getCount(), Matchers.equalTo(1L)); assertShardStats(response.getIndicesStats().getShards(), 1, 4, 2, 1.0); prepareCreate("test2").setSettings(Settings.builder().put("number_of_shards", 3).put("number_of_replicas", 0)).get(); ensureGreen(); - response = client().admin().cluster().prepareClusterStats().get(); + response = client().admin().cluster().prepareClusterStats().useAggregatedNodeLevelResponses(true).get(); assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN)); assertThat(response.indicesStats.getIndexCount(), Matchers.equalTo(2)); assertShardStats(response.getIndicesStats().getShards(), 2, 7, 5, 2.0 / 5); @@ -225,7 +278,11 @@ public void testValuesSmokeScreen() throws IOException, ExecutionException, Inte internalCluster().startNodes(randomIntBetween(1, 3)); index("test1", "type", "1", "f", "f"); - ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get(); + ClusterStatsResponse response = client().admin() + .cluster() + .prepareClusterStats() + .useAggregatedNodeLevelResponses(randomBoolean()) + .get(); String msg = response.toString(); assertThat(msg, response.getTimestamp(), Matchers.greaterThan(946681200000L)); // 1 Jan 2000 assertThat(msg, response.indicesStats.getStore().getSizeInBytes(), Matchers.greaterThan(0L)); @@ -265,13 +322,21 @@ public void testAllocatedProcessors() throws Exception { internalCluster().startNode(Settings.builder().put(OpenSearchExecutors.NODE_PROCESSORS_SETTING.getKey(), 7).build()); waitForNodes(1); - ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get(); + ClusterStatsResponse response = client().admin() + .cluster() + .prepareClusterStats() + .useAggregatedNodeLevelResponses(randomBoolean()) + .get(); assertThat(response.getNodesStats().getOs().getAllocatedProcessors(), equalTo(7)); } public void testClusterStatusWhenStateNotRecovered() throws Exception { internalCluster().startClusterManagerOnlyNode(Settings.builder().put("gateway.recover_after_nodes", 2).build()); - ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get(); + ClusterStatsResponse response = client().admin() + .cluster() + .prepareClusterStats() + .useAggregatedNodeLevelResponses(randomBoolean()) + .get(); assertThat(response.getStatus(), equalTo(ClusterHealthStatus.RED)); if (randomBoolean()) { @@ -281,14 +346,18 @@ public void testClusterStatusWhenStateNotRecovered() throws Exception { } // wait for the cluster status to settle ensureGreen(); - response = client().admin().cluster().prepareClusterStats().get(); + response = client().admin().cluster().prepareClusterStats().useAggregatedNodeLevelResponses(randomBoolean()).get(); assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN)); } public void testFieldTypes() { internalCluster().startNode(); ensureGreen(); - ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get(); + ClusterStatsResponse response = client().admin() + .cluster() + .prepareClusterStats() + .useAggregatedNodeLevelResponses(randomBoolean()) + .get(); assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN)); assertTrue(response.getIndicesStats().getMappings().getFieldTypeStats().isEmpty()); @@ -301,7 +370,7 @@ public void testFieldTypes() { + "\"eggplant\":{\"type\":\"integer\"}}}}}" ) .get(); - response = client().admin().cluster().prepareClusterStats().get(); + response = client().admin().cluster().prepareClusterStats().useAggregatedNodeLevelResponses(randomBoolean()).get(); assertThat(response.getIndicesStats().getMappings().getFieldTypeStats().size(), equalTo(3)); Set stats = response.getIndicesStats().getMappings().getFieldTypeStats(); for (IndexFeatureStats stat : stats) { @@ -329,7 +398,11 @@ public void testNodeRolesWithMasterLegacySettings() throws ExecutionException, I Map expectedCounts = getExpectedCounts(0, 1, 1, 0, 1, 0, 0); Client client = client(); - ClusterStatsResponse clusterStatsResponse = client.admin().cluster().prepareClusterStats().get(); + ClusterStatsResponse clusterStatsResponse = client.admin() + .cluster() + .prepareClusterStats() + .useAggregatedNodeLevelResponses(randomBoolean()) + .get(); assertCounts(clusterStatsResponse.getNodesStats().getCounts(), total, expectedCounts); Set expectedRoles = Set.of( @@ -359,7 +432,11 @@ public void testNodeRolesWithClusterManagerRole() throws ExecutionException, Int Map expectedCounts = getExpectedCounts(0, 1, 1, 0, 1, 0, 0); Client client = client(); - ClusterStatsResponse clusterStatsResponse = client.admin().cluster().prepareClusterStats().get(); + ClusterStatsResponse clusterStatsResponse = client.admin() + .cluster() + .prepareClusterStats() + .useAggregatedNodeLevelResponses(randomBoolean()) + .get(); assertCounts(clusterStatsResponse.getNodesStats().getCounts(), total, expectedCounts); Set expectedRoles = Set.of( @@ -383,7 +460,11 @@ public void testNodeRolesWithSeedDataNodeLegacySettings() throws ExecutionExcept Map expectedRoleCounts = getExpectedCounts(1, 1, 1, 0, 1, 0, 0); Client client = client(); - ClusterStatsResponse clusterStatsResponse = client.admin().cluster().prepareClusterStats().get(); + ClusterStatsResponse clusterStatsResponse = client.admin() + .cluster() + .prepareClusterStats() + .useAggregatedNodeLevelResponses(randomBoolean()) + .get(); assertCounts(clusterStatsResponse.getNodesStats().getCounts(), total, expectedRoleCounts); Set expectedRoles = Set.of( @@ -410,7 +491,11 @@ public void testNodeRolesWithDataNodeLegacySettings() throws ExecutionException, Map expectedRoleCounts = getExpectedCounts(1, 1, 1, 0, 1, 0, 0); Client client = client(); - ClusterStatsResponse clusterStatsResponse = client.admin().cluster().prepareClusterStats().get(); + ClusterStatsResponse clusterStatsResponse = client.admin() + .cluster() + .prepareClusterStats() + .useAggregatedNodeLevelResponses(randomBoolean()) + .get(); assertCounts(clusterStatsResponse.getNodesStats().getCounts(), total, expectedRoleCounts); Set> expectedNodesRoles = Set.of( diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIndices.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIndices.java index 26e554f44fca1..03a73f45ffe81 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIndices.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIndices.java @@ -78,26 +78,49 @@ public ClusterStatsIndices(List nodeResponses, Mapping this.segments = new SegmentsStats(); for (ClusterStatsNodeResponse r : nodeResponses) { - for (org.opensearch.action.admin.indices.stats.ShardStats shardStats : r.shardsStats()) { - ShardStats indexShardStats = countsPerIndex.get(shardStats.getShardRouting().getIndexName()); - if (indexShardStats == null) { - indexShardStats = new ShardStats(); - countsPerIndex.put(shardStats.getShardRouting().getIndexName(), indexShardStats); + // Aggregated response from the node + if (r.getAggregatedNodeLevelStats() != null) { + + for (Map.Entry entry : r.getAggregatedNodeLevelStats().indexStatsMap + .entrySet()) { + ShardStats indexShardStats = countsPerIndex.get(entry.getKey()); + if (indexShardStats == null) { + indexShardStats = new ShardStats(entry.getValue()); + countsPerIndex.put(entry.getKey(), indexShardStats); + } else { + indexShardStats.addStatsFrom(entry.getValue()); + } } - indexShardStats.total++; - - CommonStats shardCommonStats = shardStats.getStats(); - - if (shardStats.getShardRouting().primary()) { - indexShardStats.primaries++; - docs.add(shardCommonStats.docs); + docs.add(r.getAggregatedNodeLevelStats().commonStats.docs); + store.add(r.getAggregatedNodeLevelStats().commonStats.store); + fieldData.add(r.getAggregatedNodeLevelStats().commonStats.fieldData); + queryCache.add(r.getAggregatedNodeLevelStats().commonStats.queryCache); + completion.add(r.getAggregatedNodeLevelStats().commonStats.completion); + segments.add(r.getAggregatedNodeLevelStats().commonStats.segments); + } else { + // Default response from the node + for (org.opensearch.action.admin.indices.stats.ShardStats shardStats : r.shardsStats()) { + ShardStats indexShardStats = countsPerIndex.get(shardStats.getShardRouting().getIndexName()); + if (indexShardStats == null) { + indexShardStats = new ShardStats(); + countsPerIndex.put(shardStats.getShardRouting().getIndexName(), indexShardStats); + } + + indexShardStats.total++; + + CommonStats shardCommonStats = shardStats.getStats(); + + if (shardStats.getShardRouting().primary()) { + indexShardStats.primaries++; + docs.add(shardCommonStats.docs); + } + store.add(shardCommonStats.store); + fieldData.add(shardCommonStats.fieldData); + queryCache.add(shardCommonStats.queryCache); + completion.add(shardCommonStats.completion); + segments.add(shardCommonStats.segments); } - store.add(shardCommonStats.store); - fieldData.add(shardCommonStats.fieldData); - queryCache.add(shardCommonStats.queryCache); - completion.add(shardCommonStats.completion); - segments.add(shardCommonStats.segments); } } @@ -202,6 +225,11 @@ public static class ShardStats implements ToXContentFragment { public ShardStats() {} + public ShardStats(ClusterStatsNodeResponse.AggregatedIndexStats aggregatedIndexStats) { + this.total = aggregatedIndexStats.total; + this.primaries = aggregatedIndexStats.primaries; + } + /** * number of indices in the cluster */ @@ -329,6 +357,11 @@ public void addIndexShardCount(ShardStats indexShardCount) { } } + public void addStatsFrom(ClusterStatsNodeResponse.AggregatedIndexStats incomingStats) { + this.total += incomingStats.total; + this.primaries += incomingStats.primaries; + } + /** * Inner Fields used for creating XContent and parsing * diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java index 1b25bf84356d6..133cf68f5f8c9 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java @@ -32,17 +32,29 @@ package org.opensearch.action.admin.cluster.stats; +import org.opensearch.Version; import org.opensearch.action.admin.cluster.node.info.NodeInfo; import org.opensearch.action.admin.cluster.node.stats.NodeStats; +import org.opensearch.action.admin.indices.stats.CommonStats; import org.opensearch.action.admin.indices.stats.ShardStats; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.index.cache.query.QueryCacheStats; +import org.opensearch.index.engine.SegmentsStats; +import org.opensearch.index.fielddata.FieldDataStats; +import org.opensearch.index.shard.DocsStats; +import org.opensearch.index.store.StoreStats; +import org.opensearch.search.suggest.completion.CompletionStats; import java.io.IOException; +import java.util.HashMap; +import java.util.Map; /** * Transport action for obtaining cluster stats from node level @@ -55,6 +67,7 @@ public class ClusterStatsNodeResponse extends BaseNodeResponse { private final NodeStats nodeStats; private final ShardStats[] shardsStats; private ClusterHealthStatus clusterStatus; + private AggregatedNodeLevelStats aggregatedNodeLevelStats; public ClusterStatsNodeResponse(StreamInput in) throws IOException { super(in); @@ -64,7 +77,12 @@ public ClusterStatsNodeResponse(StreamInput in) throws IOException { } this.nodeInfo = new NodeInfo(in); this.nodeStats = new NodeStats(in); - shardsStats = in.readArray(ShardStats::new, ShardStats[]::new); + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + this.shardsStats = in.readOptionalArray(ShardStats::new, ShardStats[]::new); + this.aggregatedNodeLevelStats = in.readOptionalWriteable(AggregatedNodeLevelStats::new); + } else { + this.shardsStats = in.readArray(ShardStats::new, ShardStats[]::new); + } } public ClusterStatsNodeResponse( @@ -81,6 +99,24 @@ public ClusterStatsNodeResponse( this.clusterStatus = clusterStatus; } + public ClusterStatsNodeResponse( + DiscoveryNode node, + @Nullable ClusterHealthStatus clusterStatus, + NodeInfo nodeInfo, + NodeStats nodeStats, + ShardStats[] shardsStats, + boolean useAggregatedNodeLevelResponses + ) { + super(node); + this.nodeInfo = nodeInfo; + this.nodeStats = nodeStats; + if (useAggregatedNodeLevelResponses) { + this.aggregatedNodeLevelStats = new AggregatedNodeLevelStats(node, shardsStats); + } + this.shardsStats = shardsStats; + this.clusterStatus = clusterStatus; + } + public NodeInfo nodeInfo() { return this.nodeInfo; } @@ -101,6 +137,10 @@ public ShardStats[] shardsStats() { return this.shardsStats; } + public AggregatedNodeLevelStats getAggregatedNodeLevelStats() { + return aggregatedNodeLevelStats; + } + public static ClusterStatsNodeResponse readNodeResponse(StreamInput in) throws IOException { return new ClusterStatsNodeResponse(in); } @@ -116,6 +156,95 @@ public void writeTo(StreamOutput out) throws IOException { } nodeInfo.writeTo(out); nodeStats.writeTo(out); - out.writeArray(shardsStats); + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (aggregatedNodeLevelStats != null) { + out.writeOptionalArray(null); + out.writeOptionalWriteable(aggregatedNodeLevelStats); + } else { + out.writeOptionalArray(shardsStats); + out.writeOptionalWriteable(null); + } + } else { + out.writeArray(shardsStats); + } + } + + /** + * Node level statistics used for ClusterStatsIndices for _cluster/stats call. + */ + public class AggregatedNodeLevelStats extends BaseNodeResponse { + + CommonStats commonStats; + Map indexStatsMap; + + protected AggregatedNodeLevelStats(StreamInput in) throws IOException { + super(in); + commonStats = in.readOptionalWriteable(CommonStats::new); + indexStatsMap = in.readMap(StreamInput::readString, AggregatedIndexStats::new); + } + + protected AggregatedNodeLevelStats(DiscoveryNode node, ShardStats[] indexShardsStats) { + super(node); + this.commonStats = new CommonStats(); + this.commonStats.docs = new DocsStats(); + this.commonStats.store = new StoreStats(); + this.commonStats.fieldData = new FieldDataStats(); + this.commonStats.queryCache = new QueryCacheStats(); + this.commonStats.completion = new CompletionStats(); + this.commonStats.segments = new SegmentsStats(); + this.indexStatsMap = new HashMap<>(); + + // Index Level Stats + for (org.opensearch.action.admin.indices.stats.ShardStats shardStats : indexShardsStats) { + AggregatedIndexStats indexShardStats = this.indexStatsMap.get(shardStats.getShardRouting().getIndexName()); + if (indexShardStats == null) { + indexShardStats = new AggregatedIndexStats(); + this.indexStatsMap.put(shardStats.getShardRouting().getIndexName(), indexShardStats); + } + + indexShardStats.total++; + + CommonStats shardCommonStats = shardStats.getStats(); + + if (shardStats.getShardRouting().primary()) { + indexShardStats.primaries++; + this.commonStats.docs.add(shardCommonStats.docs); + } + this.commonStats.store.add(shardCommonStats.store); + this.commonStats.fieldData.add(shardCommonStats.fieldData); + this.commonStats.queryCache.add(shardCommonStats.queryCache); + this.commonStats.completion.add(shardCommonStats.completion); + this.commonStats.segments.add(shardCommonStats.segments); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeOptionalWriteable(commonStats); + out.writeMap(indexStatsMap, StreamOutput::writeString, (stream, stats) -> stats.writeTo(stream)); + } + } + + /** + * Node level statistics used for ClusterStatsIndices for _cluster/stats call. + */ + @PublicApi(since = "2.16.0") + public static class AggregatedIndexStats implements Writeable { + public int total = 0; + public int primaries = 0; + + public AggregatedIndexStats(StreamInput in) throws IOException { + total = in.readVInt(); + primaries = in.readVInt(); + } + + public AggregatedIndexStats() {} + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(total); + out.writeVInt(primaries); + } } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequest.java index 6a99451c596ed..fdeb82a3466f2 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequest.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.cluster.stats; +import org.opensearch.Version; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; @@ -49,8 +50,13 @@ public class ClusterStatsRequest extends BaseNodesRequest { public ClusterStatsRequest(StreamInput in) throws IOException { super(in); + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + useAggregatedNodeLevelResponses = in.readOptionalBoolean(); + } } + private Boolean useAggregatedNodeLevelResponses = false; + /** * Get stats from nodes based on the nodes ids specified. If none are passed, stats * based on all nodes will be returned. @@ -59,9 +65,20 @@ public ClusterStatsRequest(String... nodesIds) { super(nodesIds); } + public boolean useAggregatedNodeLevelResponses() { + return useAggregatedNodeLevelResponses; + } + + public void useAggregatedNodeLevelResponses(boolean useAggregatedNodeLevelResponses) { + this.useAggregatedNodeLevelResponses = useAggregatedNodeLevelResponses; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeOptionalBoolean(useAggregatedNodeLevelResponses); + } } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequestBuilder.java index 0dcb03dc26d0e..4d0932bd3927d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequestBuilder.java @@ -50,4 +50,9 @@ public class ClusterStatsRequestBuilder extends NodesOperationRequestBuilder< public ClusterStatsRequestBuilder(OpenSearchClient client, ClusterStatsAction action) { super(client, action, new ClusterStatsRequest()); } + + public final ClusterStatsRequestBuilder useAggregatedNodeLevelResponses(boolean useAggregatedNodeLevelResponses) { + request.useAggregatedNodeLevelResponses(useAggregatedNodeLevelResponses); + return this; + } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java index c7d03596a2a36..be7d41a7ba75e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -212,8 +212,14 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq clusterStatus = new ClusterStateHealth(clusterService.state()).getStatus(); } - return new ClusterStatsNodeResponse(nodeInfo.getNode(), clusterStatus, nodeInfo, nodeStats, shardsStats.toArray(new ShardStats[0])); - + return new ClusterStatsNodeResponse( + nodeInfo.getNode(), + clusterStatus, + nodeInfo, + nodeStats, + shardsStats.toArray(new ShardStats[0]), + nodeRequest.request.useAggregatedNodeLevelResponses() + ); } /** diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterStatsAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterStatsAction.java index 913db3c81e951..d4426a004af8e 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterStatsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterStatsAction.java @@ -67,6 +67,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC ClusterStatsRequest clusterStatsRequest = new ClusterStatsRequest().nodesIds(request.paramAsStringArray("nodeId", null)); clusterStatsRequest.timeout(request.param("timeout")); clusterStatsRequest.setIncludeDiscoveryNodes(false); + clusterStatsRequest.useAggregatedNodeLevelResponses(true); return channel -> client.admin().cluster().clusterStats(clusterStatsRequest, new NodesResponseRestListener<>(channel)); } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodesTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodesTests.java index 40a30342b86b9..1c4a77905d73f 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodesTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodesTests.java @@ -32,16 +32,38 @@ package org.opensearch.action.admin.cluster.stats; +import org.opensearch.Build; +import org.opensearch.Version; import org.opensearch.action.admin.cluster.node.info.NodeInfo; import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.action.admin.cluster.node.stats.NodeStatsTests; +import org.opensearch.action.admin.indices.stats.CommonStats; +import org.opensearch.action.admin.indices.stats.CommonStatsFlags; +import org.opensearch.action.admin.indices.stats.ShardStats; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.cluster.routing.TestShardRouting; +import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.network.NetworkModule; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.index.Index; import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.index.cache.query.QueryCacheStats; +import org.opensearch.index.engine.SegmentsStats; +import org.opensearch.index.fielddata.FieldDataStats; +import org.opensearch.index.flush.FlushStats; +import org.opensearch.index.shard.DocsStats; +import org.opensearch.index.shard.IndexingStats; +import org.opensearch.index.shard.ShardPath; +import org.opensearch.index.store.StoreStats; +import org.opensearch.search.suggest.completion.CompletionStats; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; +import java.nio.file.Path; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Iterator; @@ -158,6 +180,253 @@ public void testIngestStats() throws Exception { ); } + public void testMultiVersionScenarioWithAggregatedNodeLevelStats() { + // Assuming the default behavior will be the type of response expected from a node of version prior to version containing + // aggregated node level information + int numberOfNodes = randomIntBetween(1, 4); + Index testIndex = new Index("test-index", "_na_"); + + List defaultClusterStatsNodeResponses = new ArrayList<>(); + List aggregatedNodeLevelClusterStatsNodeResponses = new ArrayList<>(); + + for (int i = 0; i < numberOfNodes; i++) { + DiscoveryNode node = new DiscoveryNode("node-" + i, buildNewFakeTransportAddress(), Version.CURRENT); + CommonStats commonStats = createRandomCommonStats(); + ShardStats[] shardStats = createshardStats(node, testIndex, commonStats); + ClusterStatsNodeResponse customClusterStatsResponse = createClusterStatsNodeResponse(node, shardStats, testIndex, true, false); + ClusterStatsNodeResponse customNodeLevelAggregatedClusterStatsResponse = createClusterStatsNodeResponse( + node, + shardStats, + testIndex, + false, + true + ); + defaultClusterStatsNodeResponses.add(customClusterStatsResponse); + aggregatedNodeLevelClusterStatsNodeResponses.add(customNodeLevelAggregatedClusterStatsResponse); + } + + ClusterStatsIndices defaultClusterStatsIndices = new ClusterStatsIndices(defaultClusterStatsNodeResponses, null, null); + ClusterStatsIndices aggregatedNodeLevelClusterStatsIndices = new ClusterStatsIndices( + aggregatedNodeLevelClusterStatsNodeResponses, + null, + null + ); + + assertClusterStatsIndicesEqual(defaultClusterStatsIndices, aggregatedNodeLevelClusterStatsIndices); + } + + public void assertClusterStatsIndicesEqual(ClusterStatsIndices first, ClusterStatsIndices second) { + assertEquals(first.getIndexCount(), second.getIndexCount()); + + assertEquals(first.getShards().getIndices(), second.getShards().getIndices()); + assertEquals(first.getShards().getTotal(), second.getShards().getTotal()); + assertEquals(first.getShards().getPrimaries(), second.getShards().getPrimaries()); + assertEquals(first.getShards().getMinIndexShards(), second.getShards().getMaxIndexShards()); + assertEquals(first.getShards().getMinIndexPrimaryShards(), second.getShards().getMinIndexPrimaryShards()); + + // As AssertEquals with double is deprecated and can only be used to compare floating-point numbers + assertTrue(first.getShards().getReplication() == second.getShards().getReplication()); + assertTrue(first.getShards().getAvgIndexShards() == second.getShards().getAvgIndexShards()); + assertTrue(first.getShards().getMaxIndexPrimaryShards() == second.getShards().getMaxIndexPrimaryShards()); + assertTrue(first.getShards().getAvgIndexPrimaryShards() == second.getShards().getAvgIndexPrimaryShards()); + assertTrue(first.getShards().getMinIndexReplication() == second.getShards().getMinIndexReplication()); + assertTrue(first.getShards().getAvgIndexReplication() == second.getShards().getAvgIndexReplication()); + assertTrue(first.getShards().getMaxIndexReplication() == second.getShards().getMaxIndexReplication()); + + // Docs stats + assertEquals(first.getDocs().getAverageSizeInBytes(), second.getDocs().getAverageSizeInBytes()); + assertEquals(first.getDocs().getDeleted(), second.getDocs().getDeleted()); + assertEquals(first.getDocs().getCount(), second.getDocs().getCount()); + assertEquals(first.getDocs().getTotalSizeInBytes(), second.getDocs().getTotalSizeInBytes()); + + // Store Stats + assertEquals(first.getStore().getSizeInBytes(), second.getStore().getSizeInBytes()); + assertEquals(first.getStore().getSize(), second.getStore().getSize()); + assertEquals(first.getStore().getReservedSize(), second.getStore().getReservedSize()); + + // Query Cache + assertEquals(first.getQueryCache().getCacheCount(), second.getQueryCache().getCacheCount()); + assertEquals(first.getQueryCache().getCacheSize(), second.getQueryCache().getCacheSize()); + assertEquals(first.getQueryCache().getEvictions(), second.getQueryCache().getEvictions()); + assertEquals(first.getQueryCache().getHitCount(), second.getQueryCache().getHitCount()); + assertEquals(first.getQueryCache().getTotalCount(), second.getQueryCache().getTotalCount()); + assertEquals(first.getQueryCache().getMissCount(), second.getQueryCache().getMissCount()); + assertEquals(first.getQueryCache().getMemorySize(), second.getQueryCache().getMemorySize()); + assertEquals(first.getQueryCache().getMemorySizeInBytes(), second.getQueryCache().getMemorySizeInBytes()); + + // Completion Stats + assertEquals(first.getCompletion().getSizeInBytes(), second.getCompletion().getSizeInBytes()); + assertEquals(first.getCompletion().getSize(), second.getCompletion().getSize()); + + // Segment Stats + assertEquals(first.getSegments().getBitsetMemory(), second.getSegments().getBitsetMemory()); + assertEquals(first.getSegments().getCount(), second.getSegments().getCount()); + assertEquals(first.getSegments().getBitsetMemoryInBytes(), second.getSegments().getBitsetMemoryInBytes()); + assertEquals(first.getSegments().getFileSizes(), second.getSegments().getFileSizes()); + assertEquals(first.getSegments().getIndexWriterMemoryInBytes(), second.getSegments().getIndexWriterMemoryInBytes()); + assertEquals(first.getSegments().getVersionMapMemory(), second.getSegments().getVersionMapMemory()); + assertEquals(first.getSegments().getVersionMapMemoryInBytes(), second.getSegments().getVersionMapMemoryInBytes()); + } + + public void testNodeIndexShardStatsSuccessfulSerializationDeserialization() throws IOException { + Index testIndex = new Index("test-index", "_na_"); + + DiscoveryNode node = new DiscoveryNode("node", buildNewFakeTransportAddress(), Version.CURRENT); + CommonStats commonStats = createRandomCommonStats(); + ShardStats[] shardStats = createshardStats(node, testIndex, commonStats); + ClusterStatsNodeResponse aggregatedNodeLevelClusterStatsNodeResponse = createClusterStatsNodeResponse( + node, + shardStats, + testIndex, + false, + true + ); + + BytesStreamOutput out = new BytesStreamOutput(); + aggregatedNodeLevelClusterStatsNodeResponse.writeTo(out); + StreamInput in = out.bytes().streamInput(); + + ClusterStatsNodeResponse newClusterStatsNodeRequest = new ClusterStatsNodeResponse(in); + + ClusterStatsIndices beforeSerialization = new ClusterStatsIndices(List.of(aggregatedNodeLevelClusterStatsNodeResponse), null, null); + ClusterStatsIndices afterSerialization = new ClusterStatsIndices(List.of(newClusterStatsNodeRequest), null, null); + + assertClusterStatsIndicesEqual(beforeSerialization, afterSerialization); + + } + + private ClusterStatsNodeResponse createClusterStatsNodeResponse( + DiscoveryNode node, + ShardStats[] shardStats, + Index index, + boolean defaultBehavior, + boolean aggregateNodeLevelStats + ) { + NodeInfo nodeInfo = new NodeInfo( + Version.CURRENT, + Build.CURRENT, + node, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ); + + NodeStats nodeStats = new NodeStats( + node, + randomNonNegativeLong(), + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ); + if (defaultBehavior) { + return new ClusterStatsNodeResponse(node, null, nodeInfo, nodeStats, shardStats); + } else { + return new ClusterStatsNodeResponse(node, null, nodeInfo, nodeStats, shardStats, aggregateNodeLevelStats); + } + + } + + private CommonStats createRandomCommonStats() { + CommonStats commonStats = new CommonStats(CommonStatsFlags.NONE); + commonStats.docs = new DocsStats(randomLongBetween(0, 10000), randomLongBetween(0, 100), randomLongBetween(0, 1000)); + commonStats.store = new StoreStats(randomLongBetween(0, 100), randomLongBetween(0, 1000)); + commonStats.indexing = new IndexingStats(); + commonStats.completion = new CompletionStats(); + commonStats.flush = new FlushStats(randomLongBetween(0, 100), randomLongBetween(0, 100), randomLongBetween(0, 100)); + commonStats.fieldData = new FieldDataStats(randomLongBetween(0, 100), randomLongBetween(0, 100), null); + commonStats.queryCache = new QueryCacheStats( + randomLongBetween(0, 100), + randomLongBetween(0, 100), + randomLongBetween(0, 100), + randomLongBetween(0, 100), + randomLongBetween(0, 100) + ); + commonStats.segments = new SegmentsStats(); + + return commonStats; + } + + private ShardStats[] createshardStats(DiscoveryNode localNode, Index index, CommonStats commonStats) { + List shardStatsList = new ArrayList<>(); + for (int i = 0; i < 2; i++) { + ShardRoutingState shardRoutingState = ShardRoutingState.fromValue((byte) randomIntBetween(2, 3)); + ShardRouting shardRouting = TestShardRouting.newShardRouting( + index.getName(), + i, + localNode.getId(), + randomBoolean(), + shardRoutingState + ); + + Path path = createTempDir().resolve("indices") + .resolve(shardRouting.shardId().getIndex().getUUID()) + .resolve(String.valueOf(shardRouting.shardId().id())); + + ShardStats shardStats = new ShardStats( + shardRouting, + new ShardPath(false, path, path, shardRouting.shardId()), + commonStats, + null, + null, + null + ); + shardStatsList.add(shardStats); + } + + return shardStatsList.toArray(new ShardStats[0]); + } + + private class MockShardStats extends ClusterStatsIndices.ShardStats { + public boolean equals(ClusterStatsIndices.ShardStats shardStats) { + return this.getIndices() == shardStats.getIndices() + && this.getTotal() == shardStats.getTotal() + && this.getPrimaries() == shardStats.getPrimaries() + && this.getReplication() == shardStats.getReplication() + && this.getMaxIndexShards() == shardStats.getMaxIndexShards() + && this.getMinIndexShards() == shardStats.getMinIndexShards() + && this.getAvgIndexShards() == shardStats.getAvgIndexShards() + && this.getMaxIndexPrimaryShards() == shardStats.getMaxIndexPrimaryShards() + && this.getMinIndexPrimaryShards() == shardStats.getMinIndexPrimaryShards() + && this.getAvgIndexPrimaryShards() == shardStats.getAvgIndexPrimaryShards() + && this.getMinIndexReplication() == shardStats.getMinIndexReplication() + && this.getAvgIndexReplication() == shardStats.getAvgIndexReplication() + && this.getMaxIndexReplication() == shardStats.getMaxIndexReplication(); + } + } + private static NodeInfo createNodeInfo(String nodeId, String transportType, String httpType) { Settings.Builder settings = Settings.builder(); if (transportType != null) { From 349708198d01f205293d0ee5ca0bdae7b9ffd76a Mon Sep 17 00:00:00 2001 From: Gaurav Bafna <85113518+gbbafna@users.noreply.github.com> Date: Tue, 23 Jul 2024 21:57:47 +0530 Subject: [PATCH 05/68] Fix constraint bug which allows more primary shards than average primary shards per index (#14908) Signed-off-by: Gaurav Bafna --- .../opensearch/cluster/routing/allocation/ConstraintTypes.java | 2 +- .../cluster/routing/allocation/AllocationConstraintsTests.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java index 08fe8f92d1f80..28ad199218884 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java @@ -70,7 +70,7 @@ public static Predicate isPerIndexPrimaryShardsPerN return (params) -> { int perIndexPrimaryShardCount = params.getNode().numPrimaryShards(params.getIndex()); int perIndexAllowedPrimaryShardCount = (int) Math.ceil(params.getBalancer().avgPrimaryShardsPerNode(params.getIndex())); - return perIndexPrimaryShardCount > perIndexAllowedPrimaryShardCount; + return perIndexPrimaryShardCount >= perIndexAllowedPrimaryShardCount; }; } diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationConstraintsTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationConstraintsTests.java index 90546620e9e3e..4c9fcd1650664 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationConstraintsTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationConstraintsTests.java @@ -93,7 +93,7 @@ public void testPerIndexPrimaryShardsConstraint() { assertEquals(0, constraints.weight(balancer, node, indexName)); - perIndexPrimaryShardCount = 3; + perIndexPrimaryShardCount = 2; when(node.numPrimaryShards(anyString())).thenReturn(perIndexPrimaryShardCount); assertEquals(CONSTRAINT_WEIGHT, constraints.weight(balancer, node, indexName)); From e46d1d8685a9b90a1f25920989e567373ee23284 Mon Sep 17 00:00:00 2001 From: rishavz_sagar Date: Tue, 23 Jul 2024 22:27:45 +0530 Subject: [PATCH 06/68] Optmising AwarenessAllocationDecider for hashmap.get call (#14761) Signed-off-by: RS146BIJAY --- .../decider/AwarenessAllocationDecider.java | 91 ++++++++++++------- 1 file changed, 58 insertions(+), 33 deletions(-) diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java index 5344d95b217a7..16c94acfbb553 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java @@ -111,7 +111,6 @@ public class AwarenessAllocationDecider extends AllocationDecider { ); private volatile List awarenessAttributes; - private volatile Map> forcedAwarenessAttributes; public AwarenessAllocationDecider(Settings settings, ClusterSettings clusterSettings) { @@ -163,8 +162,8 @@ private Decision underCapacity(ShardRouting shardRouting, RoutingNode node, Rout IndexMetadata indexMetadata = allocation.metadata().getIndexSafe(shardRouting.index()); int shardCount = indexMetadata.getNumberOfReplicas() + 1; // 1 for primary for (String awarenessAttribute : awarenessAttributes) { - // the node the shard exists on must be associated with an awareness attribute - if (node.node().getAttributes().containsKey(awarenessAttribute) == false) { + // the node the shard exists on must be associated with an awareness attribute. + if (isAwarenessAttributeAssociatedWithNode(node, awarenessAttribute) == false) { return allocation.decision( Decision.NO, NAME, @@ -175,36 +174,10 @@ private Decision underCapacity(ShardRouting shardRouting, RoutingNode node, Rout ); } + int currentNodeCount = getCurrentNodeCountForAttribute(shardRouting, node, allocation, moveToNode, awarenessAttribute); + // build attr_value -> nodes map Set nodesPerAttribute = allocation.routingNodes().nodesPerAttributesCounts(awarenessAttribute); - - // build the count of shards per attribute value - Map shardPerAttribute = new HashMap<>(); - for (ShardRouting assignedShard : allocation.routingNodes().assignedShards(shardRouting.shardId())) { - if (assignedShard.started() || assignedShard.initializing()) { - // Note: this also counts relocation targets as that will be the new location of the shard. - // Relocation sources should not be counted as the shard is moving away - RoutingNode routingNode = allocation.routingNodes().node(assignedShard.currentNodeId()); - shardPerAttribute.merge(routingNode.node().getAttributes().get(awarenessAttribute), 1, Integer::sum); - } - } - - if (moveToNode) { - if (shardRouting.assignedToNode()) { - String nodeId = shardRouting.relocating() ? shardRouting.relocatingNodeId() : shardRouting.currentNodeId(); - if (node.nodeId().equals(nodeId) == false) { - // we work on different nodes, move counts around - shardPerAttribute.compute( - allocation.routingNodes().node(nodeId).node().getAttributes().get(awarenessAttribute), - (k, v) -> (v == null) ? 0 : v - 1 - ); - shardPerAttribute.merge(node.node().getAttributes().get(awarenessAttribute), 1, Integer::sum); - } - } else { - shardPerAttribute.merge(node.node().getAttributes().get(awarenessAttribute), 1, Integer::sum); - } - } - int numberOfAttributes = nodesPerAttribute.size(); List fullValues = forcedAwarenessAttributes.get(awarenessAttribute); @@ -216,9 +189,8 @@ private Decision underCapacity(ShardRouting shardRouting, RoutingNode node, Rout } numberOfAttributes = attributesSet.size(); } - // TODO should we remove ones that are not part of full list? - final int currentNodeCount = shardPerAttribute.get(node.node().getAttributes().get(awarenessAttribute)); + // TODO should we remove ones that are not part of full list? final int maximumNodeCount = (shardCount + numberOfAttributes - 1) / numberOfAttributes; // ceil(shardCount/numberOfAttributes) if (currentNodeCount > maximumNodeCount) { return allocation.decision( @@ -238,4 +210,57 @@ private Decision underCapacity(ShardRouting shardRouting, RoutingNode node, Rout return allocation.decision(Decision.YES, NAME, "node meets all awareness attribute requirements"); } + + private int getCurrentNodeCountForAttribute( + ShardRouting shardRouting, + RoutingNode node, + RoutingAllocation allocation, + boolean moveToNode, + String awarenessAttribute + ) { + // build the count of shards per attribute value + final String shardAttributeForNode = getAttributeValueForNode(node, awarenessAttribute); + int currentNodeCount = 0; + final List assignedShards = allocation.routingNodes().assignedShards(shardRouting.shardId()); + + for (ShardRouting assignedShard : assignedShards) { + if (assignedShard.started() || assignedShard.initializing()) { + // Note: this also counts relocation targets as that will be the new location of the shard. + // Relocation sources should not be counted as the shard is moving away + RoutingNode routingNode = allocation.routingNodes().node(assignedShard.currentNodeId()); + // Increase node count when + if (getAttributeValueForNode(routingNode, awarenessAttribute).equals(shardAttributeForNode)) { + ++currentNodeCount; + } + } + } + + if (moveToNode) { + if (shardRouting.assignedToNode()) { + String nodeId = shardRouting.relocating() ? shardRouting.relocatingNodeId() : shardRouting.currentNodeId(); + if (node.nodeId().equals(nodeId) == false) { + // we work on different nodes, move counts around + if (getAttributeValueForNode(allocation.routingNodes().node(nodeId), awarenessAttribute).equals(shardAttributeForNode) + && currentNodeCount > 0) { + --currentNodeCount; + } + + ++currentNodeCount; + } + } else { + ++currentNodeCount; + } + } + + return currentNodeCount; + } + + private boolean isAwarenessAttributeAssociatedWithNode(RoutingNode node, String awarenessAttribute) { + return node.node().getAttributes().containsKey(awarenessAttribute); + } + + private String getAttributeValueForNode(final RoutingNode node, final String awarenessAttribute) { + return node.node().getAttributes().get(awarenessAttribute); + } + } From 087355f0ee676064ea409ed68090b33e568ea941 Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Tue, 23 Jul 2024 14:26:22 -0500 Subject: [PATCH 07/68] Fix IngestServiceTests.testBulkRequestExecutionWithFailures (#14918) The test would previously fail if the randomness led to only a single indexing request being included in the bulk payload. This change guarantees multiple indexing requests in order to ensure the batch logic kicks in. Also replace some unneeded mocks with real classes. Signed-off-by: Andrew Ross --- .../opensearch/ingest/IngestServiceTests.java | 47 +++++++++---------- 1 file changed, 22 insertions(+), 25 deletions(-) diff --git a/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java b/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java index 9d03127692975..166b94966196c 100644 --- a/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java +++ b/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java @@ -78,6 +78,7 @@ import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPool.Names; +import org.hamcrest.MatcherAssert; import org.junit.Before; import java.nio.charset.StandardCharsets; @@ -104,15 +105,16 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.sameInstance; import static org.mockito.Mockito.any; -import static org.mockito.Mockito.anyInt; import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.argThat; import static org.mockito.Mockito.doAnswer; @@ -1106,27 +1108,23 @@ public void testExecuteFailureWithNestedOnFailure() throws Exception { verify(completionHandler, times(1)).accept(Thread.currentThread(), null); } - public void testBulkRequestExecutionWithFailures() throws Exception { + public void testBulkRequestExecutionWithFailures() { BulkRequest bulkRequest = new BulkRequest(); String pipelineId = "_id"; - int numRequest = scaledRandomIntBetween(8, 64); - int numIndexRequests = 0; - for (int i = 0; i < numRequest; i++) { - DocWriteRequest request; + int numIndexRequests = scaledRandomIntBetween(4, 32); + for (int i = 0; i < numIndexRequests; i++) { + IndexRequest indexRequest = new IndexRequest("_index").id("_id").setPipeline(pipelineId).setFinalPipeline("_none"); + indexRequest.source(Requests.INDEX_CONTENT_TYPE, "field1", "value1"); + bulkRequest.add(indexRequest); + } + int numOtherRequests = scaledRandomIntBetween(4, 32); + for (int i = 0; i < numOtherRequests; i++) { if (randomBoolean()) { - if (randomBoolean()) { - request = new DeleteRequest("_index", "_id"); - } else { - request = new UpdateRequest("_index", "_id"); - } + bulkRequest.add(new DeleteRequest("_index", "_id")); } else { - IndexRequest indexRequest = new IndexRequest("_index").id("_id").setPipeline(pipelineId).setFinalPipeline("_none"); - indexRequest.source(Requests.INDEX_CONTENT_TYPE, "field1", "value1"); - request = indexRequest; - numIndexRequests++; + bulkRequest.add(new UpdateRequest("_index", "_id")); } - bulkRequest.add(request); } CompoundProcessor processor = mock(CompoundProcessor.class); @@ -1155,23 +1153,22 @@ public void testBulkRequestExecutionWithFailures() throws Exception { clusterState = IngestService.innerPut(putRequest, clusterState); ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); - @SuppressWarnings("unchecked") - BiConsumer requestItemErrorHandler = mock(BiConsumer.class); - @SuppressWarnings("unchecked") - final BiConsumer completionHandler = mock(BiConsumer.class); + final Map errorHandler = new HashMap<>(); + final Map completionHandler = new HashMap<>(); ingestService.executeBulkRequest( - numRequest, + numIndexRequests + numOtherRequests, bulkRequest.requests(), - requestItemErrorHandler, - completionHandler, + errorHandler::put, + completionHandler::put, indexReq -> {}, Names.WRITE, bulkRequest ); - verify(requestItemErrorHandler, times(numIndexRequests)).accept(anyInt(), argThat(o -> o.getCause().equals(error))); + MatcherAssert.assertThat(errorHandler.entrySet(), hasSize(numIndexRequests)); + errorHandler.values().forEach(e -> assertEquals(e.getCause(), error)); - verify(completionHandler, times(1)).accept(Thread.currentThread(), null); + MatcherAssert.assertThat(completionHandler.keySet(), contains(Thread.currentThread())); } public void testBulkRequestExecution() throws Exception { From 312de9947b8848150743623009e8d4b95487e911 Mon Sep 17 00:00:00 2001 From: Bharathwaj G Date: Wed, 24 Jul 2024 08:54:27 +0530 Subject: [PATCH 08/68] [Star tree] Star tree merge changes (#14652) --------- Signed-off-by: Bharathwaj G --- .../composite/Composite99DocValuesReader.java | 10 +- .../composite/Composite99DocValuesWriter.java | 97 +- .../composite/CompositeIndexFieldInfo.java | 37 + .../codec/composite/CompositeIndexReader.java | 5 +- .../datacube/startree/StarTreeValues.java | 47 +- .../aggregators/CountValueAggregator.java | 11 +- .../aggregators/MetricAggregatorInfo.java | 21 +- .../aggregators/SumValueAggregator.java | 17 +- .../startree/aggregators/ValueAggregator.java | 6 +- .../aggregators/ValueAggregatorFactory.java | 9 +- .../startree/builder/BaseStarTreeBuilder.java | 258 +- .../builder/OnHeapStarTreeBuilder.java | 148 +- .../startree/builder/StarTreeBuilder.java | 18 +- .../StarTreeDocValuesIteratorAdapter.java | 82 - .../startree/builder/StarTreesBuilder.java | 61 +- .../datacube/startree/node/StarTreeNode.java | 112 + .../datacube/startree/node/package-info.java | 12 + .../utils/SequentialDocValuesIterator.java | 109 +- .../mapper/CompositeMappedFieldType.java | 4 + .../StarTreeDocValuesFormatTests.java | 172 +- .../CountValueAggregatorTests.java | 8 +- .../MetricAggregatorInfoTests.java | 34 +- .../aggregators/SumValueAggregatorTests.java | 15 +- .../ValueAggregatorFactoryTests.java | 2 +- .../builder/AbstractStarTreeBuilderTests.java | 2251 +++++++++++++++++ .../builder/BaseStarTreeBuilderTests.java | 25 +- .../builder/OnHeapStarTreeBuilderTests.java | 696 +---- ...StarTreeDocValuesIteratorAdapterTests.java | 139 - .../StarTreeValuesIteratorFactoryTests.java | 131 - .../builder/StarTreesBuilderTests.java | 14 +- .../SequentialDocValuesIteratorTests.java | 131 +- .../org/opensearch/index/MapperTestUtils.java | 34 + 32 files changed, 3281 insertions(+), 1435 deletions(-) create mode 100644 server/src/main/java/org/opensearch/index/codec/composite/CompositeIndexFieldInfo.java delete mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeDocValuesIteratorAdapter.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/node/StarTreeNode.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/node/package-info.java create mode 100644 server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/AbstractStarTreeBuilderTests.java delete mode 100644 server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeDocValuesIteratorAdapterTests.java delete mode 100644 server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeValuesIteratorFactoryTests.java diff --git a/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesReader.java b/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesReader.java index 82c844088cfd4..df5008a7f294e 100644 --- a/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesReader.java +++ b/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesReader.java @@ -17,9 +17,9 @@ import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.index.SortedSetDocValues; import org.opensearch.common.annotation.ExperimentalApi; -import org.opensearch.index.mapper.CompositeMappedFieldType; import java.io.IOException; +import java.util.ArrayList; import java.util.List; /** @@ -74,15 +74,13 @@ public void close() throws IOException { } @Override - public List getCompositeIndexFields() { + public List getCompositeIndexFields() { // todo : read from file formats and get the field names. - throw new UnsupportedOperationException(); - + return new ArrayList<>(); } @Override - public CompositeIndexValues getCompositeIndexValues(String field, CompositeMappedFieldType.CompositeFieldType fieldType) - throws IOException { + public CompositeIndexValues getCompositeIndexValues(CompositeIndexFieldInfo compositeIndexFieldInfo) throws IOException { // TODO : read compositeIndexValues [starTreeValues] from star tree files throw new UnsupportedOperationException(); } diff --git a/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesWriter.java b/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesWriter.java index 3753b20a8bea3..3859d3c998573 100644 --- a/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesWriter.java +++ b/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesWriter.java @@ -8,20 +8,29 @@ package org.opensearch.index.codec.composite; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.DocValuesConsumer; import org.apache.lucene.codecs.DocValuesProducer; +import org.apache.lucene.index.DocValues; +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.EmptyDocValuesProducer; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.MergeState; import org.apache.lucene.index.SegmentWriteState; +import org.apache.lucene.index.SortedNumericDocValues; import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.codec.composite.datacube.startree.StarTreeValues; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; import org.opensearch.index.compositeindex.datacube.startree.builder.StarTreesBuilder; import org.opensearch.index.mapper.CompositeMappedFieldType; import org.opensearch.index.mapper.MapperService; -import org.opensearch.index.mapper.StarTreeMapper; import java.io.IOException; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicReference; @@ -40,8 +49,10 @@ public class Composite99DocValuesWriter extends DocValuesConsumer { AtomicReference mergeState = new AtomicReference<>(); private final Set compositeMappedFieldTypes; private final Set compositeFieldSet; + private final Set segmentFieldSet; private final Map fieldProducerMap = new HashMap<>(); + private static final Logger logger = LogManager.getLogger(Composite99DocValuesWriter.class); public Composite99DocValuesWriter(DocValuesConsumer delegate, SegmentWriteState segmentWriteState, MapperService mapperService) { @@ -50,6 +61,12 @@ public Composite99DocValuesWriter(DocValuesConsumer delegate, SegmentWriteState this.mapperService = mapperService; this.compositeMappedFieldTypes = mapperService.getCompositeFieldTypes(); compositeFieldSet = new HashSet<>(); + segmentFieldSet = new HashSet<>(); + for (FieldInfo fi : segmentWriteState.fieldInfos) { + if (DocValuesType.SORTED_NUMERIC.equals(fi.getDocValuesType())) { + segmentFieldSet.add(fi.name); + } + } for (CompositeMappedFieldType type : compositeMappedFieldTypes) { compositeFieldSet.addAll(type.fields()); } @@ -95,23 +112,91 @@ private void createCompositeIndicesIfPossible(DocValuesProducer valuesProducer, fieldProducerMap.put(field.name, valuesProducer); compositeFieldSet.remove(field.name); } + segmentFieldSet.remove(field.name); + if (segmentFieldSet.isEmpty()) { + Set compositeFieldSetCopy = new HashSet<>(compositeFieldSet); + for (String compositeField : compositeFieldSetCopy) { + fieldProducerMap.put(compositeField, new EmptyDocValuesProducer() { + @Override + public SortedNumericDocValues getSortedNumeric(FieldInfo field) { + return DocValues.emptySortedNumeric(); + } + }); + compositeFieldSet.remove(compositeField); + } + } // we have all the required fields to build composite fields if (compositeFieldSet.isEmpty()) { for (CompositeMappedFieldType mappedType : compositeMappedFieldTypes) { - if (mappedType instanceof StarTreeMapper.StarTreeFieldType) { - try (StarTreesBuilder starTreesBuilder = new StarTreesBuilder(fieldProducerMap, state, mapperService)) { - starTreesBuilder.build(); + if (mappedType.getCompositeIndexType().equals(CompositeMappedFieldType.CompositeFieldType.STAR_TREE)) { + try (StarTreesBuilder starTreesBuilder = new StarTreesBuilder(state, mapperService)) { + starTreesBuilder.build(fieldProducerMap); } } } } + } @Override public void merge(MergeState mergeState) throws IOException { this.mergeState.compareAndSet(null, mergeState); super.merge(mergeState); - // TODO : handle merge star tree - // mergeStarTreeFields(mergeState); + mergeCompositeFields(mergeState); + } + + /** + * Merges composite fields from multiple segments + * @param mergeState merge state + */ + private void mergeCompositeFields(MergeState mergeState) throws IOException { + mergeStarTreeFields(mergeState); + } + + /** + * Merges star tree data fields from multiple segments + * @param mergeState merge state + */ + private void mergeStarTreeFields(MergeState mergeState) throws IOException { + Map> starTreeSubsPerField = new HashMap<>(); + StarTreeField starTreeField = null; + for (int i = 0; i < mergeState.docValuesProducers.length; i++) { + CompositeIndexReader reader = null; + if (mergeState.docValuesProducers[i] == null) { + continue; + } + if (mergeState.docValuesProducers[i] instanceof CompositeIndexReader) { + reader = (CompositeIndexReader) mergeState.docValuesProducers[i]; + } else { + continue; + } + + List compositeFieldInfo = reader.getCompositeIndexFields(); + for (CompositeIndexFieldInfo fieldInfo : compositeFieldInfo) { + if (fieldInfo.getType().equals(CompositeMappedFieldType.CompositeFieldType.STAR_TREE)) { + CompositeIndexValues compositeIndexValues = reader.getCompositeIndexValues(fieldInfo); + if (compositeIndexValues instanceof StarTreeValues) { + StarTreeValues starTreeValues = (StarTreeValues) compositeIndexValues; + List fieldsList = starTreeSubsPerField.getOrDefault(fieldInfo.getField(), Collections.emptyList()); + if (starTreeField == null) { + starTreeField = starTreeValues.getStarTreeField(); + } + // assert star tree configuration is same across segments + else { + if (starTreeField.equals(starTreeValues.getStarTreeField()) == false) { + throw new IllegalArgumentException( + "star tree field configuration must match the configuration of the field being merged" + ); + } + } + fieldsList.add(starTreeValues); + starTreeSubsPerField.put(fieldInfo.getField(), fieldsList); + } + } + } + } + try (StarTreesBuilder starTreesBuilder = new StarTreesBuilder(state, mapperService)) { + starTreesBuilder.buildDuringMerge(starTreeSubsPerField); + } } } diff --git a/server/src/main/java/org/opensearch/index/codec/composite/CompositeIndexFieldInfo.java b/server/src/main/java/org/opensearch/index/codec/composite/CompositeIndexFieldInfo.java new file mode 100644 index 0000000000000..8193fcc301e67 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/composite/CompositeIndexFieldInfo.java @@ -0,0 +1,37 @@ + +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.composite; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.mapper.CompositeMappedFieldType; + +/** + * Field info details of composite index fields + * + * @opensearch.experimental + */ +@ExperimentalApi +public class CompositeIndexFieldInfo { + private final String field; + private final CompositeMappedFieldType.CompositeFieldType type; + + public CompositeIndexFieldInfo(String field, CompositeMappedFieldType.CompositeFieldType type) { + this.field = field; + this.type = type; + } + + public String getField() { + return field; + } + + public CompositeMappedFieldType.CompositeFieldType getType() { + return type; + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/composite/CompositeIndexReader.java b/server/src/main/java/org/opensearch/index/codec/composite/CompositeIndexReader.java index d02438b75377d..a159b0619bcbb 100644 --- a/server/src/main/java/org/opensearch/index/codec/composite/CompositeIndexReader.java +++ b/server/src/main/java/org/opensearch/index/codec/composite/CompositeIndexReader.java @@ -9,7 +9,6 @@ package org.opensearch.index.codec.composite; import org.opensearch.common.annotation.ExperimentalApi; -import org.opensearch.index.mapper.CompositeMappedFieldType; import java.io.IOException; import java.util.List; @@ -25,10 +24,10 @@ public interface CompositeIndexReader { * Get list of composite index fields from the segment * */ - List getCompositeIndexFields(); + List getCompositeIndexFields(); /** * Get composite index values based on the field name and the field type */ - CompositeIndexValues getCompositeIndexValues(String field, CompositeMappedFieldType.CompositeFieldType fieldType) throws IOException; + CompositeIndexValues getCompositeIndexValues(CompositeIndexFieldInfo fieldInfo) throws IOException; } diff --git a/server/src/main/java/org/opensearch/index/codec/composite/datacube/startree/StarTreeValues.java b/server/src/main/java/org/opensearch/index/codec/composite/datacube/startree/StarTreeValues.java index 2a5b96ce2620a..8378a4063b7ca 100644 --- a/server/src/main/java/org/opensearch/index/codec/composite/datacube/startree/StarTreeValues.java +++ b/server/src/main/java/org/opensearch/index/codec/composite/datacube/startree/StarTreeValues.java @@ -8,10 +8,13 @@ package org.opensearch.index.codec.composite.datacube.startree; +import org.apache.lucene.search.DocIdSetIterator; import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.index.codec.composite.CompositeIndexValues; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; +import org.opensearch.index.compositeindex.datacube.startree.node.StarTreeNode; -import java.util.List; +import java.util.Map; /** * Concrete class that holds the star tree associated values from the segment @@ -20,16 +23,48 @@ */ @ExperimentalApi public class StarTreeValues implements CompositeIndexValues { - private final List dimensionsOrder; + private final StarTreeField starTreeField; + private final StarTreeNode root; + private final Map dimensionDocValuesIteratorMap; + private final Map metricDocValuesIteratorMap; + private final Map attributes; - // TODO : come up with full set of vales such as dimensions and metrics doc values + star tree - public StarTreeValues(List dimensionsOrder) { - super(); - this.dimensionsOrder = List.copyOf(dimensionsOrder); + public StarTreeValues( + StarTreeField starTreeField, + StarTreeNode root, + Map dimensionDocValuesIteratorMap, + Map metricDocValuesIteratorMap, + Map attributes + ) { + this.starTreeField = starTreeField; + this.root = root; + this.dimensionDocValuesIteratorMap = dimensionDocValuesIteratorMap; + this.metricDocValuesIteratorMap = metricDocValuesIteratorMap; + this.attributes = attributes; } @Override public CompositeIndexValues getValues() { return this; } + + public StarTreeField getStarTreeField() { + return starTreeField; + } + + public StarTreeNode getRoot() { + return root; + } + + public Map getDimensionDocValuesIteratorMap() { + return dimensionDocValuesIteratorMap; + } + + public Map getMetricDocValuesIteratorMap() { + return metricDocValuesIteratorMap; + } + + public Map getAttributes() { + return attributes; + } } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregator.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregator.java index d72f4a292dc0a..5390b6728b9b6 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregator.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregator.java @@ -18,6 +18,11 @@ public class CountValueAggregator implements ValueAggregator { public static final StarTreeNumericType VALUE_AGGREGATOR_TYPE = StarTreeNumericType.LONG; public static final long DEFAULT_INITIAL_VALUE = 1L; + private StarTreeNumericType starTreeNumericType; + + public CountValueAggregator(StarTreeNumericType starTreeNumericType) { + this.starTreeNumericType = starTreeNumericType; + } @Override public MetricStat getAggregationType() { @@ -30,12 +35,12 @@ public StarTreeNumericType getAggregatedValueType() { } @Override - public Long getInitialAggregatedValueForSegmentDocValue(Long segmentDocValue, StarTreeNumericType starTreeNumericType) { + public Long getInitialAggregatedValueForSegmentDocValue(Long segmentDocValue) { return DEFAULT_INITIAL_VALUE; } @Override - public Long mergeAggregatedValueAndSegmentValue(Long value, Long segmentDocValue, StarTreeNumericType starTreeNumericType) { + public Long mergeAggregatedValueAndSegmentValue(Long value, Long segmentDocValue) { return value + 1; } @@ -60,7 +65,7 @@ public Long toLongValue(Long value) { } @Override - public Long toStarTreeNumericTypeValue(Long value, StarTreeNumericType type) { + public Long toStarTreeNumericTypeValue(Long value) { return value; } } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/MetricAggregatorInfo.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/MetricAggregatorInfo.java index 46f1b1ac11063..a9209a38eca82 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/MetricAggregatorInfo.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/MetricAggregatorInfo.java @@ -9,7 +9,6 @@ import org.opensearch.index.compositeindex.datacube.MetricStat; import org.opensearch.index.compositeindex.datacube.startree.aggregators.numerictype.StarTreeNumericType; -import org.opensearch.index.compositeindex.datacube.startree.utils.SequentialDocValuesIterator; import org.opensearch.index.fielddata.IndexNumericFieldData; import java.util.Comparator; @@ -17,7 +16,6 @@ /** * Builds aggregation function and doc values field pair to support various aggregations - * * @opensearch.experimental */ public class MetricAggregatorInfo implements Comparable { @@ -29,22 +27,14 @@ public class MetricAggregatorInfo implements Comparable { private final String field; private final ValueAggregator valueAggregators; private final StarTreeNumericType starTreeNumericType; - private final SequentialDocValuesIterator metricStatReader; /** * Constructor for MetricAggregatorInfo */ - public MetricAggregatorInfo( - MetricStat metricStat, - String field, - String starFieldName, - IndexNumericFieldData.NumericType numericType, - SequentialDocValuesIterator metricStatReader - ) { + public MetricAggregatorInfo(MetricStat metricStat, String field, String starFieldName, IndexNumericFieldData.NumericType numericType) { this.metricStat = metricStat; - this.valueAggregators = ValueAggregatorFactory.getValueAggregator(metricStat); this.starTreeNumericType = StarTreeNumericType.fromNumericType(numericType); - this.metricStatReader = metricStatReader; + this.valueAggregators = ValueAggregatorFactory.getValueAggregator(metricStat, this.starTreeNumericType); this.field = field; this.starFieldName = starFieldName; this.metric = toFieldName(); @@ -85,13 +75,6 @@ public StarTreeNumericType getAggregatedValueType() { return starTreeNumericType; } - /** - * @return metric value reader iterator - */ - public SequentialDocValuesIterator getMetricStatReader() { - return metricStatReader; - } - /** * @return field name with metric type and field */ diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/SumValueAggregator.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/SumValueAggregator.java index 543b0f7f42374..385549216e4d6 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/SumValueAggregator.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/SumValueAggregator.java @@ -24,6 +24,12 @@ public class SumValueAggregator implements ValueAggregator { private double compensation = 0; private CompensatedSum kahanSummation = new CompensatedSum(0, 0); + private StarTreeNumericType starTreeNumericType; + + public SumValueAggregator(StarTreeNumericType starTreeNumericType) { + this.starTreeNumericType = starTreeNumericType; + } + @Override public MetricStat getAggregationType() { return MetricStat.SUM; @@ -35,7 +41,7 @@ public StarTreeNumericType getAggregatedValueType() { } @Override - public Double getInitialAggregatedValueForSegmentDocValue(Long segmentDocValue, StarTreeNumericType starTreeNumericType) { + public Double getInitialAggregatedValueForSegmentDocValue(Long segmentDocValue) { kahanSummation.reset(0, 0); kahanSummation.add(starTreeNumericType.getDoubleValue(segmentDocValue)); compensation = kahanSummation.delta(); @@ -44,7 +50,7 @@ public Double getInitialAggregatedValueForSegmentDocValue(Long segmentDocValue, } @Override - public Double mergeAggregatedValueAndSegmentValue(Double value, Long segmentDocValue, StarTreeNumericType starTreeNumericType) { + public Double mergeAggregatedValueAndSegmentValue(Double value, Long segmentDocValue) { assert kahanSummation.value() == value; kahanSummation.reset(sum, compensation); kahanSummation.add(starTreeNumericType.getDoubleValue(segmentDocValue)); @@ -87,9 +93,12 @@ public Long toLongValue(Double value) { } @Override - public Double toStarTreeNumericTypeValue(Long value, StarTreeNumericType type) { + public Double toStarTreeNumericTypeValue(Long value) { try { - return type.getDoubleValue(value); + if (value == null) { + return 0.0; + } + return VALUE_AGGREGATOR_TYPE.getDoubleValue(value); } catch (Exception e) { throw new IllegalStateException("Cannot convert " + value + " to sortable aggregation type", e); } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/ValueAggregator.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/ValueAggregator.java index 3dd1f85845c17..93230ed012b13 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/ValueAggregator.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/ValueAggregator.java @@ -30,12 +30,12 @@ public interface ValueAggregator { /** * Returns the initial aggregated value. */ - A getInitialAggregatedValueForSegmentDocValue(Long segmentDocValue, StarTreeNumericType starTreeNumericType); + A getInitialAggregatedValueForSegmentDocValue(Long segmentDocValue); /** * Applies a segment doc value to the current aggregated value. */ - A mergeAggregatedValueAndSegmentValue(A value, Long segmentDocValue, StarTreeNumericType starTreeNumericType); + A mergeAggregatedValueAndSegmentValue(A value, Long segmentDocValue); /** * Applies an aggregated value to the current aggregated value. @@ -60,5 +60,5 @@ public interface ValueAggregator { /** * Converts an aggregated value from a Long type. */ - A toStarTreeNumericTypeValue(Long rawValue, StarTreeNumericType type); + A toStarTreeNumericTypeValue(Long rawValue); } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/ValueAggregatorFactory.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/ValueAggregatorFactory.java index 4ee0b0b5b13f8..240bbd37a53ee 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/ValueAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/ValueAggregatorFactory.java @@ -21,16 +21,17 @@ private ValueAggregatorFactory() {} /** * Returns a new instance of value aggregator for the given aggregation type. * - * @param aggregationType Aggregation type + * @param aggregationType Aggregation type + * @param starTreeNumericType Numeric type associated with star tree field ( as specified in index mapping ) * @return Value aggregator */ - public static ValueAggregator getValueAggregator(MetricStat aggregationType) { + public static ValueAggregator getValueAggregator(MetricStat aggregationType, StarTreeNumericType starTreeNumericType) { switch (aggregationType) { // other metric types (count, min, max, avg) will be supported in the future case SUM: - return new SumValueAggregator(); + return new SumValueAggregator(starTreeNumericType); case COUNT: - return new CountValueAggregator(); + return new CountValueAggregator(starTreeNumericType); default: throw new IllegalStateException("Unsupported aggregation type: " + aggregationType); } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java index 0a363bfad8fe1..7187fade882ea 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java @@ -12,7 +12,11 @@ import org.apache.lucene.codecs.DocValuesProducer; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.SegmentWriteState; +import org.apache.lucene.index.VectorEncoding; +import org.apache.lucene.index.VectorSimilarityFunction; +import org.opensearch.index.codec.composite.datacube.startree.StarTreeValues; import org.opensearch.index.compositeindex.datacube.Dimension; import org.opensearch.index.compositeindex.datacube.Metric; import org.opensearch.index.compositeindex.datacube.MetricStat; @@ -21,7 +25,6 @@ import org.opensearch.index.compositeindex.datacube.startree.StarTreeFieldConfiguration; import org.opensearch.index.compositeindex.datacube.startree.aggregators.MetricAggregatorInfo; import org.opensearch.index.compositeindex.datacube.startree.aggregators.ValueAggregator; -import org.opensearch.index.compositeindex.datacube.startree.aggregators.numerictype.StarTreeNumericType; import org.opensearch.index.compositeindex.datacube.startree.utils.SequentialDocValuesIterator; import org.opensearch.index.compositeindex.datacube.startree.utils.TreeNode; import org.opensearch.index.fielddata.IndexNumericFieldData; @@ -32,11 +35,13 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; import static org.opensearch.index.compositeindex.datacube.startree.utils.TreeNode.ALL; @@ -54,8 +59,7 @@ public abstract class BaseStarTreeBuilder implements StarTreeBuilder { /** * Default value for star node */ - public static final int STAR_IN_DOC_VALUES_INDEX = -1; - + public static final Long STAR_IN_DOC_VALUES_INDEX = null; protected final Set skipStarNodeCreationForDimensions; protected final List metricAggregatorInfos; @@ -68,59 +72,41 @@ public abstract class BaseStarTreeBuilder implements StarTreeBuilder { protected final TreeNode rootNode = getNewNode(); - protected SequentialDocValuesIterator[] dimensionReaders; - - // We do not close these producers as they are empty doc value producers (where close() is unsupported) - protected Map fieldProducerMap; - - private final StarTreeDocValuesIteratorAdapter starTreeDocValuesIteratorAdapter; private final StarTreeField starTreeField; + private final MapperService mapperService; + private final SegmentWriteState state; + static String NUM_SEGMENT_DOCS = "numSegmentDocs"; /** * Reads all the configuration related to dimensions and metrics, builds a star-tree based on the different construction parameters. * * @param starTreeField holds the configuration for the star tree - * @param fieldProducerMap helps return the doc values iterator for each type based on field name * @param state stores the segment write state * @param mapperService helps to find the original type of the field */ - protected BaseStarTreeBuilder( - StarTreeField starTreeField, - Map fieldProducerMap, - SegmentWriteState state, - MapperService mapperService - ) throws IOException { - - logger.debug("Building in base star tree builder"); + protected BaseStarTreeBuilder(StarTreeField starTreeField, SegmentWriteState state, MapperService mapperService) { + logger.debug("Building star tree : {}", starTreeField.getName()); this.starTreeField = starTreeField; StarTreeFieldConfiguration starTreeFieldSpec = starTreeField.getStarTreeConfig(); - this.fieldProducerMap = fieldProducerMap; - this.starTreeDocValuesIteratorAdapter = new StarTreeDocValuesIteratorAdapter(); List dimensionsSplitOrder = starTreeField.getDimensionsOrder(); this.numDimensions = dimensionsSplitOrder.size(); this.skipStarNodeCreationForDimensions = new HashSet<>(); this.totalSegmentDocs = state.segmentInfo.maxDoc(); - this.dimensionReaders = new SequentialDocValuesIterator[numDimensions]; + this.mapperService = mapperService; + this.state = state; + Set skipStarNodeCreationForDimensions = starTreeFieldSpec.getSkipStarNodeCreationInDims(); for (int i = 0; i < numDimensions; i++) { - String dimension = dimensionsSplitOrder.get(i).getField(); if (skipStarNodeCreationForDimensions.contains(dimensionsSplitOrder.get(i).getField())) { this.skipStarNodeCreationForDimensions.add(i); } - FieldInfo dimensionFieldInfos = state.fieldInfos.fieldInfo(dimension); - DocValuesType dimensionDocValuesType = dimensionFieldInfos.getDocValuesType(); - dimensionReaders[i] = starTreeDocValuesIteratorAdapter.getDocValuesIterator( - dimensionDocValuesType, - dimensionFieldInfos, - fieldProducerMap.get(dimensionFieldInfos.name) - ); } - this.metricAggregatorInfos = generateMetricAggregatorInfos(mapperService, state); + this.metricAggregatorInfos = generateMetricAggregatorInfos(mapperService); this.numMetrics = metricAggregatorInfos.size(); this.maxLeafDocuments = starTreeFieldSpec.maxLeafDocs(); } @@ -130,13 +116,11 @@ protected BaseStarTreeBuilder( * * @return list of MetricAggregatorInfo */ - public List generateMetricAggregatorInfos(MapperService mapperService, SegmentWriteState state) - throws IOException { + public List generateMetricAggregatorInfos(MapperService mapperService) { List metricAggregatorInfos = new ArrayList<>(); for (Metric metric : this.starTreeField.getMetrics()) { for (MetricStat metricStat : metric.getMetrics()) { IndexNumericFieldData.NumericType numericType; - SequentialDocValuesIterator metricStatReader; Mapper fieldMapper = mapperService.documentMapper().mappers().getMapper(metric.getField()); if (fieldMapper instanceof NumberFieldMapper) { numericType = ((NumberFieldMapper) fieldMapper).fieldType().numericType(); @@ -145,24 +129,11 @@ public List generateMetricAggregatorInfos(MapperService ma throw new IllegalStateException("unsupported mapper type"); } - FieldInfo metricFieldInfos = state.fieldInfos.fieldInfo(metric.getField()); - DocValuesType metricDocValuesType = metricFieldInfos.getDocValuesType(); - if (metricStat != MetricStat.COUNT) { - metricStatReader = starTreeDocValuesIteratorAdapter.getDocValuesIterator( - metricDocValuesType, - metricFieldInfos, - fieldProducerMap.get(metricFieldInfos.name) - ); - } else { - metricStatReader = new SequentialDocValuesIterator(); - } - MetricAggregatorInfo metricAggregatorInfo = new MetricAggregatorInfo( metricStat, metric.getField(), starTreeField.getName(), - numericType, - metricStatReader + numericType ); metricAggregatorInfos.add(metricAggregatorInfo); } @@ -204,12 +175,17 @@ public List generateMetricAggregatorInfos(MapperService ma public abstract Long getDimensionValue(int docId, int dimensionId) throws IOException; /** - * Sorts and aggregates the star-tree document in the segment, and returns a star-tree document iterator for all the - * aggregated star-tree document. + * Sorts and aggregates all the documents in the segment as per the configuration, and returns a star-tree document iterator for all the + * aggregated star-tree documents. * + * @param dimensionReaders List of docValues readers to read dimensions from the segment + * @param metricReaders List of docValues readers to read metrics from the segment * @return Iterator for the aggregated star-tree document */ - public abstract Iterator sortAndAggregateStarTreeDocuments() throws IOException; + public abstract Iterator sortAndAggregateSegmentDocuments( + SequentialDocValuesIterator[] dimensionReaders, + List metricReaders + ) throws IOException; /** * Generates aggregated star-tree documents for star-node. @@ -223,13 +199,16 @@ public abstract Iterator generateStarTreeDocumentsForStarNode( throws IOException; /** - * Returns the star-tree document from the segment + * Returns the star-tree document from the segment based on the current doc id * - * @throws IOException when we are unable to build a star tree document from the segment */ - protected StarTreeDocument getSegmentStarTreeDocument(int currentDocId) throws IOException { - Long[] dimensions = getStarTreeDimensionsFromSegment(currentDocId); - Object[] metrics = getStarTreeMetricsFromSegment(currentDocId); + protected StarTreeDocument getSegmentStarTreeDocument( + int currentDocId, + SequentialDocValuesIterator[] dimensionReaders, + List metricReaders + ) throws IOException { + Long[] dimensions = getStarTreeDimensionsFromSegment(currentDocId, dimensionReaders); + Object[] metrics = getStarTreeMetricsFromSegment(currentDocId, metricReaders); return new StarTreeDocument(dimensions, metrics); } @@ -239,55 +218,48 @@ protected StarTreeDocument getSegmentStarTreeDocument(int currentDocId) throws I * @return dimension values for each of the star-tree dimension * @throws IOException when we are unable to iterate to the next doc for the given dimension readers */ - private Long[] getStarTreeDimensionsFromSegment(int currentDocId) throws IOException { + Long[] getStarTreeDimensionsFromSegment(int currentDocId, SequentialDocValuesIterator[] dimensionReaders) throws IOException { Long[] dimensions = new Long[numDimensions]; for (int i = 0; i < numDimensions; i++) { - try { - dimensions[i] = getValuesFromSegment(dimensionReaders[i], currentDocId); - } catch (Exception e) { - logger.error("unable to read the dimension values from the segment", e); - throw new IllegalStateException("unable to read the dimension values from the segment", e); + if (dimensionReaders[i] != null) { + try { + dimensionReaders[i].nextDoc(currentDocId); + } catch (IOException e) { + logger.error("unable to iterate to next doc", e); + throw new RuntimeException("unable to iterate to next doc", e); + } catch (Exception e) { + logger.error("unable to read the dimension values from the segment", e); + throw new IllegalStateException("unable to read the dimension values from the segment", e); + } + dimensions[i] = dimensionReaders[i].value(currentDocId); + } else { + throw new IllegalStateException("dimension readers are empty"); } - } return dimensions; } - /** - * Returns the next value from the iterator of respective field - * - * @param iterator respective field iterator - * @param currentDocId current document id - * @return the next value for the field - * @throws IOException when we are unable to iterate to the next doc for the given iterator - */ - private Long getValuesFromSegment(SequentialDocValuesIterator iterator, int currentDocId) throws IOException { - try { - starTreeDocValuesIteratorAdapter.nextDoc(iterator, currentDocId); - } catch (IOException e) { - logger.error("unable to iterate to next doc", e); - throw new RuntimeException("unable to iterate to next doc", e); - } - return starTreeDocValuesIteratorAdapter.getNextValue(iterator, currentDocId); - } - /** * Returns the metric values for the next document from the segment * * @return metric values for each of the star-tree metric * @throws IOException when we are unable to iterate to the next doc for the given metric readers */ - private Object[] getStarTreeMetricsFromSegment(int currentDocId) throws IOException { + private Object[] getStarTreeMetricsFromSegment(int currentDocId, List metricsReaders) throws IOException { Object[] metrics = new Object[numMetrics]; for (int i = 0; i < numMetrics; i++) { - SequentialDocValuesIterator metricStatReader = metricAggregatorInfos.get(i).getMetricStatReader(); + SequentialDocValuesIterator metricStatReader = metricsReaders.get(i); if (metricStatReader != null) { try { - metrics[i] = getValuesFromSegment(metricStatReader, currentDocId); + metricStatReader.nextDoc(currentDocId); + } catch (IOException e) { + logger.error("unable to iterate to next doc", e); + throw new RuntimeException("unable to iterate to next doc", e); } catch (Exception e) { logger.error("unable to read the metric values from the segment", e); throw new IllegalStateException("unable to read the metric values from the segment", e); } + metrics[i] = metricStatReader.value(currentDocId); } else { throw new IllegalStateException("metric readers are empty"); } @@ -306,7 +278,8 @@ private Object[] getStarTreeMetricsFromSegment(int currentDocId) throws IOExcept @SuppressWarnings({ "unchecked", "rawtypes" }) protected StarTreeDocument reduceSegmentStarTreeDocuments( StarTreeDocument aggregatedSegmentDocument, - StarTreeDocument segmentDocument + StarTreeDocument segmentDocument, + boolean isMerge ) { if (aggregatedSegmentDocument == null) { Long[] dimensions = Arrays.copyOf(segmentDocument.dimensions, numDimensions); @@ -314,11 +287,12 @@ protected StarTreeDocument reduceSegmentStarTreeDocuments( for (int i = 0; i < numMetrics; i++) { try { ValueAggregator metricValueAggregator = metricAggregatorInfos.get(i).getValueAggregators(); - StarTreeNumericType starTreeNumericType = metricAggregatorInfos.get(i).getAggregatedValueType(); - metrics[i] = metricValueAggregator.getInitialAggregatedValueForSegmentDocValue( - getLong(segmentDocument.metrics[i]), - starTreeNumericType - ); + if (isMerge) { + metrics[i] = metricValueAggregator.getInitialAggregatedValue(segmentDocument.metrics[i]); + } else { + metrics[i] = metricValueAggregator.getInitialAggregatedValueForSegmentDocValue(getLong(segmentDocument.metrics[i])); + } + } catch (Exception e) { logger.error("Cannot parse initial segment doc value", e); throw new IllegalStateException("Cannot parse initial segment doc value [" + segmentDocument.metrics[i] + "]"); @@ -329,12 +303,17 @@ protected StarTreeDocument reduceSegmentStarTreeDocuments( for (int i = 0; i < numMetrics; i++) { try { ValueAggregator metricValueAggregator = metricAggregatorInfos.get(i).getValueAggregators(); - StarTreeNumericType starTreeNumericType = metricAggregatorInfos.get(i).getAggregatedValueType(); - aggregatedSegmentDocument.metrics[i] = metricValueAggregator.mergeAggregatedValueAndSegmentValue( - aggregatedSegmentDocument.metrics[i], - getLong(segmentDocument.metrics[i]), - starTreeNumericType - ); + if (isMerge) { + aggregatedSegmentDocument.metrics[i] = metricValueAggregator.mergeAggregatedValues( + segmentDocument.metrics[i], + aggregatedSegmentDocument.metrics[i] + ); + } else { + aggregatedSegmentDocument.metrics[i] = metricValueAggregator.mergeAggregatedValueAndSegmentValue( + aggregatedSegmentDocument.metrics[i], + getLong(segmentDocument.metrics[i]) + ); + } } catch (Exception e) { logger.error("Cannot apply segment doc value for aggregation", e); throw new IllegalStateException("Cannot apply segment doc value for aggregation [" + segmentDocument.metrics[i] + "]"); @@ -364,7 +343,9 @@ private static long getLong(Object metric) { } if (metricValue == null) { - throw new IllegalStateException("unable to cast segment metric"); + return 0; + // TODO: handle this properly + // throw new IllegalStateException("unable to cast segment metric"); } return metricValue; } @@ -410,25 +391,88 @@ public StarTreeDocument reduceStarTreeDocuments(StarTreeDocument aggregatedDocum } /** - * Builds the star tree using total segment documents + * Builds the star tree from the original segment documents + * + * @param fieldProducerMap contain s the docValues producer to get docValues associated with each field * * @throws IOException when we are unable to build star-tree */ - public void build() throws IOException { + public void build(Map fieldProducerMap) throws IOException { long startTime = System.currentTimeMillis(); logger.debug("Star-tree build is a go with star tree field {}", starTreeField.getName()); - if (totalSegmentDocs == 0) { logger.debug("No documents found in the segment"); return; } - - Iterator starTreeDocumentIterator = sortAndAggregateStarTreeDocuments(); + List metricReaders = getMetricReaders(state, fieldProducerMap); + List dimensionsSplitOrder = starTreeField.getDimensionsOrder(); + SequentialDocValuesIterator[] dimensionReaders = new SequentialDocValuesIterator[dimensionsSplitOrder.size()]; + for (int i = 0; i < numDimensions; i++) { + String dimension = dimensionsSplitOrder.get(i).getField(); + FieldInfo dimensionFieldInfo = state.fieldInfos.fieldInfo(dimension); + if (dimensionFieldInfo == null) { + dimensionFieldInfo = getFieldInfo(dimension); + } + dimensionReaders[i] = new SequentialDocValuesIterator( + fieldProducerMap.get(dimensionFieldInfo.name).getSortedNumeric(dimensionFieldInfo) + ); + } + Iterator starTreeDocumentIterator = sortAndAggregateSegmentDocuments(dimensionReaders, metricReaders); logger.debug("Sorting and aggregating star-tree in ms : {}", (System.currentTimeMillis() - startTime)); build(starTreeDocumentIterator); logger.debug("Finished Building star-tree in ms : {}", (System.currentTimeMillis() - startTime)); } + private static FieldInfo getFieldInfo(String field) { + return new FieldInfo( + field, + 1, + false, + false, + false, + IndexOptions.NONE, + DocValuesType.SORTED_NUMERIC, + -1, + Collections.emptyMap(), + 0, + 0, + 0, + 0, + VectorEncoding.FLOAT32, + VectorSimilarityFunction.EUCLIDEAN, + false, + false + ); + } + + /** + * Generates the configuration required to perform aggregation for all the metrics on a field + * + * @return list of MetricAggregatorInfo + */ + public List getMetricReaders(SegmentWriteState state, Map fieldProducerMap) + throws IOException { + List metricReaders = new ArrayList<>(); + for (Metric metric : this.starTreeField.getMetrics()) { + for (MetricStat metricStat : metric.getMetrics()) { + FieldInfo metricFieldInfo = state.fieldInfos.fieldInfo(metric.getField()); + if (metricFieldInfo == null) { + metricFieldInfo = getFieldInfo(metric.getField()); + } + // TODO + // if (metricStat != MetricStat.COUNT) { + // Need not initialize the metric reader for COUNT metric type + SequentialDocValuesIterator metricReader = new SequentialDocValuesIterator( + fieldProducerMap.get(metricFieldInfo.name).getSortedNumeric(metricFieldInfo) + ); + // } + + metricReaders.add(metricReader); + } + } + return metricReaders; + } + /** * Builds the star tree using Star-Tree Document * @@ -466,7 +510,6 @@ void build(Iterator starTreeDocumentIterator) throws IOExcepti // Create doc values indices in disk // Serialize and save in disk // Write star tree metadata for off heap implementation - } /** @@ -538,10 +581,10 @@ private Map constructNonStarNodes(int startDocId, int endDocId, Long nodeDimensionValue = getDimensionValue(startDocId, dimensionId); for (int i = startDocId + 1; i < endDocId; i++) { Long dimensionValue = getDimensionValue(i, dimensionId); - if (!dimensionValue.equals(nodeDimensionValue)) { + if (Objects.equals(dimensionValue, nodeDimensionValue) == false) { TreeNode child = getNewNode(); child.dimensionId = dimensionId; - child.dimensionValue = nodeDimensionValue; + child.dimensionValue = nodeDimensionValue != null ? nodeDimensionValue : ALL; child.startDocId = nodeStartDocId; child.endDocId = i; nodes.put(nodeDimensionValue, child); @@ -552,7 +595,7 @@ private Map constructNonStarNodes(int startDocId, int endDocId, } TreeNode lastNode = getNewNode(); lastNode.dimensionId = dimensionId; - lastNode.dimensionValue = nodeDimensionValue; + lastNode.dimensionValue = nodeDimensionValue != null ? nodeDimensionValue : ALL; lastNode.startDocId = nodeStartDocId; lastNode.endDocId = endDocId; nodes.put(nodeDimensionValue, lastNode); @@ -607,7 +650,7 @@ private StarTreeDocument createAggregatedDocs(TreeNode node) throws IOException throw new IllegalStateException("aggregated star-tree document is null after reducing the documents"); } for (int i = node.dimensionId + 1; i < numDimensions; i++) { - aggregatedStarTreeDocument.dimensions[i] = Long.valueOf(STAR_IN_DOC_VALUES_INDEX); + aggregatedStarTreeDocument.dimensions[i] = STAR_IN_DOC_VALUES_INDEX; } node.aggregatedDocId = numStarTreeDocs; appendToStarTree(aggregatedStarTreeDocument); @@ -639,7 +682,7 @@ private StarTreeDocument createAggregatedDocs(TreeNode node) throws IOException throw new IllegalStateException("aggregated star-tree document is null after reducing the documents"); } for (int i = node.dimensionId + 1; i < numDimensions; i++) { - aggregatedStarTreeDocument.dimensions[i] = Long.valueOf(STAR_IN_DOC_VALUES_INDEX); + aggregatedStarTreeDocument.dimensions[i] = STAR_IN_DOC_VALUES_INDEX; } node.aggregatedDocId = numStarTreeDocs; appendToStarTree(aggregatedStarTreeDocument); @@ -665,4 +708,5 @@ public void close() throws IOException { } + abstract Iterator mergeStarTrees(List starTreeValues) throws IOException; } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/OnHeapStarTreeBuilder.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/OnHeapStarTreeBuilder.java index caeb24838da62..1599be2e76a56 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/OnHeapStarTreeBuilder.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/OnHeapStarTreeBuilder.java @@ -7,11 +7,14 @@ */ package org.opensearch.index.compositeindex.datacube.startree.builder; -import org.apache.lucene.codecs.DocValuesProducer; import org.apache.lucene.index.SegmentWriteState; +import org.apache.lucene.search.DocIdSetIterator; import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.codec.composite.datacube.startree.StarTreeValues; +import org.opensearch.index.compositeindex.datacube.Dimension; import org.opensearch.index.compositeindex.datacube.startree.StarTreeDocument; import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; +import org.opensearch.index.compositeindex.datacube.startree.utils.SequentialDocValuesIterator; import org.opensearch.index.mapper.MapperService; import java.io.IOException; @@ -36,27 +39,20 @@ public class OnHeapStarTreeBuilder extends BaseStarTreeBuilder { * Constructor for OnHeapStarTreeBuilder * * @param starTreeField star-tree field - * @param fieldProducerMap helps with document values producer for a particular field * @param segmentWriteState segment write state * @param mapperService helps with the numeric type of field - * @throws IOException throws an exception we are unable to construct an onheap star-tree */ - public OnHeapStarTreeBuilder( - StarTreeField starTreeField, - Map fieldProducerMap, - SegmentWriteState segmentWriteState, - MapperService mapperService - ) throws IOException { - super(starTreeField, fieldProducerMap, segmentWriteState, mapperService); + public OnHeapStarTreeBuilder(StarTreeField starTreeField, SegmentWriteState segmentWriteState, MapperService mapperService) { + super(starTreeField, segmentWriteState, mapperService); } @Override - public void appendStarTreeDocument(StarTreeDocument starTreeDocument) throws IOException { + public void appendStarTreeDocument(StarTreeDocument starTreeDocument) { starTreeDocuments.add(starTreeDocument); } @Override - public StarTreeDocument getStarTreeDocument(int docId) throws IOException { + public StarTreeDocument getStarTreeDocument(int docId) { return starTreeDocuments.get(docId); } @@ -66,34 +62,123 @@ public List getStarTreeDocuments() { } @Override - public Long getDimensionValue(int docId, int dimensionId) throws IOException { + public Long getDimensionValue(int docId, int dimensionId) { return starTreeDocuments.get(docId).dimensions[dimensionId]; } + /** + * Sorts and aggregates all the documents of the segment based on dimension and metrics configuration + * + * @param dimensionReaders List of docValues readers to read dimensions from the segment + * @param metricReaders List of docValues readers to read metrics from the segment + * @return Iterator of star-tree documents + * + */ @Override - public Iterator sortAndAggregateStarTreeDocuments() throws IOException { - int numDocs = totalSegmentDocs; - StarTreeDocument[] starTreeDocuments = new StarTreeDocument[numDocs]; - for (int currentDocId = 0; currentDocId < numDocs; currentDocId++) { - starTreeDocuments[currentDocId] = getSegmentStarTreeDocument(currentDocId); + public Iterator sortAndAggregateSegmentDocuments( + SequentialDocValuesIterator[] dimensionReaders, + List metricReaders + ) throws IOException { + StarTreeDocument[] starTreeDocuments = new StarTreeDocument[totalSegmentDocs]; + for (int currentDocId = 0; currentDocId < totalSegmentDocs; currentDocId++) { + // TODO : we can save empty iterator for dimensions which are not part of segment + starTreeDocuments[currentDocId] = getSegmentStarTreeDocument(currentDocId, dimensionReaders, metricReaders); } - return sortAndAggregateStarTreeDocuments(starTreeDocuments); } + @Override + public void build(List starTreeValuesSubs) throws IOException { + build(mergeStarTrees(starTreeValuesSubs)); + } + + /** + * Sorts and aggregates the star-tree documents from multiple segments and builds star tree based on the newly + * aggregated star-tree documents + * + * @param starTreeValuesSubs StarTreeValues from multiple segments + * @return iterator of star tree documents + */ + @Override + Iterator mergeStarTrees(List starTreeValuesSubs) throws IOException { + return sortAndAggregateStarTreeDocuments(getSegmentsStarTreeDocuments(starTreeValuesSubs), true); + } + + /** + * Returns an array of all the starTreeDocuments from all the segments + * We only take the non-star documents from all the segments. + * + * @param starTreeValuesSubs StarTreeValues from multiple segments + * @return array of star tree documents + */ + StarTreeDocument[] getSegmentsStarTreeDocuments(List starTreeValuesSubs) throws IOException { + List starTreeDocuments = new ArrayList<>(); + for (StarTreeValues starTreeValues : starTreeValuesSubs) { + List dimensionsSplitOrder = starTreeValues.getStarTreeField().getDimensionsOrder(); + SequentialDocValuesIterator[] dimensionReaders = new SequentialDocValuesIterator[dimensionsSplitOrder.size()]; + + for (int i = 0; i < dimensionsSplitOrder.size(); i++) { + String dimension = dimensionsSplitOrder.get(i).getField(); + dimensionReaders[i] = new SequentialDocValuesIterator(starTreeValues.getDimensionDocValuesIteratorMap().get(dimension)); + } + + List metricReaders = new ArrayList<>(); + for (Map.Entry metricDocValuesEntry : starTreeValues.getMetricDocValuesIteratorMap().entrySet()) { + metricReaders.add(new SequentialDocValuesIterator(metricDocValuesEntry.getValue())); + } + + boolean endOfDoc = false; + int currentDocId = 0; + int numSegmentDocs = Integer.parseInt( + starTreeValues.getAttributes().getOrDefault(NUM_SEGMENT_DOCS, String.valueOf(DocIdSetIterator.NO_MORE_DOCS)) + ); + while (currentDocId < numSegmentDocs) { + Long[] dims = new Long[dimensionsSplitOrder.size()]; + int i = 0; + for (SequentialDocValuesIterator dimensionDocValueIterator : dimensionReaders) { + dimensionDocValueIterator.nextDoc(currentDocId); + Long val = dimensionDocValueIterator.value(currentDocId); + dims[i] = val; + i++; + } + i = 0; + Object[] metrics = new Object[metricReaders.size()]; + for (SequentialDocValuesIterator metricDocValuesIterator : metricReaders) { + metricDocValuesIterator.nextDoc(currentDocId); + // As part of merge, we traverse the star tree doc values + // The type of data stored in metric fields is different from the + // actual indexing field they're based on + metrics[i] = metricAggregatorInfos.get(i) + .getValueAggregators() + .toStarTreeNumericTypeValue(metricDocValuesIterator.value(currentDocId)); + i++; + } + StarTreeDocument starTreeDocument = new StarTreeDocument(dims, metrics); + starTreeDocuments.add(starTreeDocument); + currentDocId++; + } + } + StarTreeDocument[] starTreeDocumentsArr = new StarTreeDocument[starTreeDocuments.size()]; + return starTreeDocuments.toArray(starTreeDocumentsArr); + } + + Iterator sortAndAggregateStarTreeDocuments(StarTreeDocument[] starTreeDocuments) { + return sortAndAggregateStarTreeDocuments(starTreeDocuments, false); + } + /** * Sort, aggregates and merges the star-tree documents * * @param starTreeDocuments star-tree documents * @return iterator for star-tree documents */ - Iterator sortAndAggregateStarTreeDocuments(StarTreeDocument[] starTreeDocuments) { + Iterator sortAndAggregateStarTreeDocuments(StarTreeDocument[] starTreeDocuments, boolean isMerge) { // sort all the documents sortStarTreeDocumentsFromDimensionId(starTreeDocuments, 0); // merge the documents - return mergeStarTreeDocuments(starTreeDocuments); + return mergeStarTreeDocuments(starTreeDocuments, isMerge); } /** @@ -102,7 +187,7 @@ Iterator sortAndAggregateStarTreeDocuments(StarTreeDocument[] * @param starTreeDocuments star-tree documents * @return iterator to aggregate star-tree documents */ - private Iterator mergeStarTreeDocuments(StarTreeDocument[] starTreeDocuments) { + private Iterator mergeStarTreeDocuments(StarTreeDocument[] starTreeDocuments, boolean isMerge) { return new Iterator<>() { boolean hasNext = true; StarTreeDocument currentStarTreeDocument = starTreeDocuments[0]; @@ -117,7 +202,7 @@ public boolean hasNext() { @Override public StarTreeDocument next() { // aggregate as we move on to the next doc - StarTreeDocument next = reduceSegmentStarTreeDocuments(null, currentStarTreeDocument); + StarTreeDocument next = reduceSegmentStarTreeDocuments(null, currentStarTreeDocument, isMerge); while (docId < starTreeDocuments.length) { StarTreeDocument starTreeDocument = starTreeDocuments[docId]; docId++; @@ -125,7 +210,7 @@ public StarTreeDocument next() { currentStarTreeDocument = starTreeDocument; return next; } else { - next = reduceSegmentStarTreeDocuments(next, starTreeDocument); + next = reduceSegmentStarTreeDocuments(next, starTreeDocument, isMerge); } } hasNext = false; @@ -141,11 +226,9 @@ public StarTreeDocument next() { * @param endDocId End document id (exclusive) in the star-tree * @param dimensionId Dimension id of the star-node * @return iterator for star-tree documents of star-node - * @throws IOException throws when unable to generate star-tree for star-node */ @Override - public Iterator generateStarTreeDocumentsForStarNode(int startDocId, int endDocId, int dimensionId) - throws IOException { + public Iterator generateStarTreeDocumentsForStarNode(int startDocId, int endDocId, int dimensionId) { int numDocs = endDocId - startDocId; StarTreeDocument[] starTreeDocuments = new StarTreeDocument[numDocs]; for (int i = 0; i < numDocs; i++) { @@ -177,7 +260,7 @@ public boolean hasNext() { @Override public StarTreeDocument next() { StarTreeDocument next = reduceStarTreeDocuments(null, currentStarTreeDocument); - next.dimensions[dimensionId] = Long.valueOf(STAR_IN_DOC_VALUES_INDEX); + next.dimensions[dimensionId] = STAR_IN_DOC_VALUES_INDEX; while (docId < numDocs) { StarTreeDocument starTreeDocument = starTreeDocuments[docId]; docId++; @@ -204,6 +287,15 @@ private void sortStarTreeDocumentsFromDimensionId(StarTreeDocument[] starTreeDoc Arrays.sort(starTreeDocuments, (o1, o2) -> { for (int i = dimensionId; i < numDimensions; i++) { if (!Objects.equals(o1.dimensions[i], o2.dimensions[i])) { + if (o1.dimensions[i] == null && o2.dimensions[i] == null) { + return 0; + } + if (o1.dimensions[i] == null) { + return 1; + } + if (o2.dimensions[i] == null) { + return -1; + } return Long.compare(o1.dimensions[i], o2.dimensions[i]); } } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilder.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilder.java index 20af1b3bc7935..94c9c9f2efb18 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilder.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilder.java @@ -8,10 +8,14 @@ package org.opensearch.index.compositeindex.datacube.startree.builder; +import org.apache.lucene.codecs.DocValuesProducer; import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.codec.composite.datacube.startree.StarTreeValues; import java.io.Closeable; import java.io.IOException; +import java.util.List; +import java.util.Map; /** * A star-tree builder that builds a single star-tree. @@ -20,10 +24,20 @@ */ @ExperimentalApi public interface StarTreeBuilder extends Closeable { + /** + * Builds the star tree from the original segment documents + * + * @param fieldProducerMap contains the docValues producer to get docValues associated with each field + * @throws IOException when we are unable to build star-tree + */ + + void build(Map fieldProducerMap) throws IOException; /** - * Builds the star tree based on star-tree field + * Builds the star tree using StarTree values from multiple segments + * + * @param starTreeValuesSubs contains the star tree values from multiple segments * @throws IOException when we are unable to build star-tree */ - void build() throws IOException; + void build(List starTreeValuesSubs) throws IOException; } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeDocValuesIteratorAdapter.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeDocValuesIteratorAdapter.java deleted file mode 100644 index cb0350bb110b0..0000000000000 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeDocValuesIteratorAdapter.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.compositeindex.datacube.startree.builder; - -import org.apache.lucene.codecs.DocValuesProducer; -import org.apache.lucene.index.DocValuesType; -import org.apache.lucene.index.FieldInfo; -import org.apache.lucene.index.SortedNumericDocValues; -import org.apache.lucene.search.DocIdSetIterator; -import org.opensearch.common.annotation.ExperimentalApi; -import org.opensearch.index.compositeindex.datacube.startree.utils.SequentialDocValuesIterator; - -import java.io.IOException; - -/** - * A factory class to return respective doc values iterator based on the doc volues type. - * - * @opensearch.experimental - */ -@ExperimentalApi -public class StarTreeDocValuesIteratorAdapter { - - /** - * Creates an iterator for the given doc values type and field using the doc values producer - */ - public SequentialDocValuesIterator getDocValuesIterator(DocValuesType type, FieldInfo field, DocValuesProducer producer) - throws IOException { - switch (type) { - case SORTED_NUMERIC: - return new SequentialDocValuesIterator(producer.getSortedNumeric(field)); - default: - throw new IllegalArgumentException("Unsupported DocValuesType: " + type); - } - } - - /** - * Returns the next value for the given iterator - */ - public Long getNextValue(SequentialDocValuesIterator sequentialDocValuesIterator, int currentDocId) throws IOException { - if (sequentialDocValuesIterator.getDocIdSetIterator() instanceof SortedNumericDocValues) { - SortedNumericDocValues sortedNumericDocValues = (SortedNumericDocValues) sequentialDocValuesIterator.getDocIdSetIterator(); - if (sequentialDocValuesIterator.getDocId() < 0 || sequentialDocValuesIterator.getDocId() == DocIdSetIterator.NO_MORE_DOCS) { - throw new IllegalStateException("invalid doc id to fetch the next value"); - } - - if (sequentialDocValuesIterator.getDocValue() == null) { - sequentialDocValuesIterator.setDocValue(sortedNumericDocValues.nextValue()); - return sequentialDocValuesIterator.getDocValue(); - } - - if (sequentialDocValuesIterator.getDocId() == currentDocId) { - Long nextValue = sequentialDocValuesIterator.getDocValue(); - sequentialDocValuesIterator.setDocValue(null); - return nextValue; - } else { - return null; - } - } else { - throw new IllegalStateException("Unsupported Iterator: " + sequentialDocValuesIterator.getDocIdSetIterator().toString()); - } - } - - /** - * Moves to the next doc in the iterator - * Returns the doc id for the next document from the given iterator - */ - public int nextDoc(SequentialDocValuesIterator iterator, int currentDocId) throws IOException { - if (iterator.getDocValue() != null) { - return iterator.getDocId(); - } - iterator.setDocId(iterator.getDocIdSetIterator().nextDoc()); - iterator.setDocValue(this.getNextValue(iterator, currentDocId)); - return iterator.getDocId(); - } - -} diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreesBuilder.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreesBuilder.java index eaf9ae1dcdaa1..6c3d476aa3a55 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreesBuilder.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreesBuilder.java @@ -13,6 +13,7 @@ import org.apache.lucene.codecs.DocValuesProducer; import org.apache.lucene.index.SegmentWriteState; import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.codec.composite.datacube.startree.StarTreeValues; import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; import org.opensearch.index.mapper.CompositeMappedFieldType; import org.opensearch.index.mapper.MapperService; @@ -37,14 +38,9 @@ public class StarTreesBuilder implements Closeable { private final List starTreeFields; private final SegmentWriteState state; - private final Map fieldProducerMap; private final MapperService mapperService; - public StarTreesBuilder( - Map fieldProducerMap, - SegmentWriteState segmentWriteState, - MapperService mapperService - ) { + public StarTreesBuilder(SegmentWriteState segmentWriteState, MapperService mapperService) { List starTreeFields = new ArrayList<>(); for (CompositeMappedFieldType compositeMappedFieldType : mapperService.getCompositeFieldTypes()) { if (compositeMappedFieldType instanceof StarTreeMapper.StarTreeFieldType) { @@ -59,9 +55,7 @@ public StarTreesBuilder( ); } } - this.starTreeFields = starTreeFields; - this.fieldProducerMap = fieldProducerMap; this.state = segmentWriteState; this.mapperService = mapperService; } @@ -69,38 +63,67 @@ public StarTreesBuilder( /** * Builds the star-trees. */ - public void build() throws IOException { + public void build(Map fieldProducerMap) throws IOException { if (starTreeFields.isEmpty()) { logger.debug("no star-tree fields found, returning from star-tree builder"); return; } long startTime = System.currentTimeMillis(); + int numStarTrees = starTreeFields.size(); logger.debug("Starting building {} star-trees with star-tree fields", numStarTrees); // Build all star-trees for (StarTreeField starTreeField : starTreeFields) { - try (StarTreeBuilder starTreeBuilder = getStarTreeBuilder(starTreeField, fieldProducerMap, state, mapperService)) { - starTreeBuilder.build(); + try (StarTreeBuilder starTreeBuilder = getSingleTreeBuilder(starTreeField, state, mapperService)) { + starTreeBuilder.build(fieldProducerMap); } } - logger.debug("Took {} ms to building {} star-trees with star-tree fields", System.currentTimeMillis() - startTime, numStarTrees); + logger.debug("Took {} ms to build {} star-trees with star-tree fields", System.currentTimeMillis() - startTime, numStarTrees); } @Override public void close() throws IOException { + // TODO : close files + } + /** + * Merges star tree fields from multiple segments + * + * @param starTreeValuesSubsPerField starTreeValuesSubs per field + */ + public void buildDuringMerge(final Map> starTreeValuesSubsPerField) throws IOException { + logger.debug("Starting merge of {} star-trees with star-tree fields", starTreeValuesSubsPerField.size()); + long startTime = System.currentTimeMillis(); + for (Map.Entry> entry : starTreeValuesSubsPerField.entrySet()) { + List starTreeValuesList = entry.getValue(); + if (starTreeValuesList.isEmpty()) { + logger.debug("StarTreeValues is empty for all segments for field : {}", entry.getKey()); + continue; + } + StarTreeField starTreeField = starTreeValuesList.get(0).getStarTreeField(); + StarTreeBuilder builder = getSingleTreeBuilder(starTreeField, state, mapperService); + builder.build(starTreeValuesList); + builder.close(); + } + logger.debug( + "Took {} ms to merge {} star-trees with star-tree fields", + System.currentTimeMillis() - startTime, + starTreeValuesSubsPerField.size() + ); } - StarTreeBuilder getStarTreeBuilder( - StarTreeField starTreeField, - Map fieldProducerMap, - SegmentWriteState state, - MapperService mapperService - ) throws IOException { + /** + * Get star-tree builder based on build mode. + */ + StarTreeBuilder getSingleTreeBuilder(StarTreeField starTreeField, SegmentWriteState state, MapperService mapperService) + throws IOException { switch (starTreeField.getStarTreeConfig().getBuildMode()) { case ON_HEAP: - return new OnHeapStarTreeBuilder(starTreeField, fieldProducerMap, state, mapperService); + return new OnHeapStarTreeBuilder(starTreeField, state, mapperService); + case OFF_HEAP: + // TODO + // return new OffHeapStarTreeBuilder(starTreeField, state, mapperService); default: throw new IllegalArgumentException( String.format( diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/node/StarTreeNode.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/node/StarTreeNode.java new file mode 100644 index 0000000000000..59522ffa4be89 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/node/StarTreeNode.java @@ -0,0 +1,112 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube.startree.node; + +import org.opensearch.common.annotation.ExperimentalApi; + +import java.io.IOException; +import java.util.Iterator; + +/** + * Interface that represents star tree node + * + * @opensearch.experimental + */ +@ExperimentalApi +public interface StarTreeNode { + long ALL = -1l; + + /** + * Returns the dimension ID of the current star-tree node. + * + * @return the dimension ID + * @throws IOException if an I/O error occurs while reading the dimension ID + */ + int getDimensionId() throws IOException; + + /** + * Returns the dimension value of the current star-tree node. + * + * @return the dimension value + * @throws IOException if an I/O error occurs while reading the dimension value + */ + long getDimensionValue() throws IOException; + + /** + * Returns the dimension ID of the child star-tree node. + * + * @return the child dimension ID + * @throws IOException if an I/O error occurs while reading the child dimension ID + */ + int getChildDimensionId() throws IOException; + + /** + * Returns the start document ID of the current star-tree node. + * + * @return the start document ID + * @throws IOException if an I/O error occurs while reading the start document ID + */ + int getStartDocId() throws IOException; + + /** + * Returns the end document ID of the current star-tree node. + * + * @return the end document ID + * @throws IOException if an I/O error occurs while reading the end document ID + */ + int getEndDocId() throws IOException; + + /** + * Returns the aggregated document ID of the current star-tree node. + * + * @return the aggregated document ID + * @throws IOException if an I/O error occurs while reading the aggregated document ID + */ + int getAggregatedDocId() throws IOException; + + /** + * Returns the number of children of the current star-tree node. + * + * @return the number of children + * @throws IOException if an I/O error occurs while reading the number of children + */ + int getNumChildren() throws IOException; + + /** + * Checks if the current node is a leaf star-tree node. + * + * @return true if the node is a leaf node, false otherwise + */ + boolean isLeaf(); + + /** + * Checks if the current node is a star node. + * + * @return true if the node is a star node, false otherwise + * @throws IOException if an I/O error occurs while reading the star node status + */ + boolean isStarNode() throws IOException; + + /** + * Returns the child star-tree node for the given dimension value. + * + * @param dimensionValue the dimension value + * @return the child node for the given dimension value or null if child is not present + * @throws IOException if an I/O error occurs while retrieving the child node + */ + StarTreeNode getChildForDimensionValue(long dimensionValue) throws IOException; + + /** + * Returns an iterator over the children of the current star-tree node. + * + * @return an iterator over the children + * @throws IOException if an I/O error occurs while retrieving the children iterator + */ + Iterator getChildrenIterator() throws IOException; +} diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/node/package-info.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/node/package-info.java new file mode 100644 index 0000000000000..516d5b5a012ab --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/node/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Holds classes associated with star tree node + */ +package org.opensearch.index.compositeindex.datacube.startree.node; diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/SequentialDocValuesIterator.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/SequentialDocValuesIterator.java index cf5f3e94c1ca6..400d7a1c00104 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/SequentialDocValuesIterator.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/SequentialDocValuesIterator.java @@ -1,3 +1,4 @@ + /* * SPDX-License-Identifier: Apache-2.0 * @@ -17,7 +18,6 @@ /** * Coordinates the reading of documents across multiple DocIdSetIterators. * It encapsulates a single DocIdSetIterator and maintains the latest document ID and its associated value. - * * @opensearch.experimental */ @ExperimentalApi @@ -28,15 +28,10 @@ public class SequentialDocValuesIterator { */ private final DocIdSetIterator docIdSetIterator; - /** - * The value associated with the latest document. - */ - private Long docValue; - /** * The id of the latest document. */ - private int docId; + private int docId = -1; /** * Constructs a new SequentialDocValuesIterator instance with the given DocIdSetIterator. @@ -47,85 +42,15 @@ public SequentialDocValuesIterator(DocIdSetIterator docIdSetIterator) { this.docIdSetIterator = docIdSetIterator; } - /** - * Constructs a new SequentialDocValuesIterator instance with the given SortedNumericDocValues. - * - */ - public SequentialDocValuesIterator() { - this.docIdSetIterator = new SortedNumericDocValues() { - @Override - public long nextValue() throws IOException { - return 0; - } - - @Override - public int docValueCount() { - return 0; - } - - @Override - public boolean advanceExact(int i) throws IOException { - return false; - } - - @Override - public int docID() { - return 0; - } - - @Override - public int nextDoc() throws IOException { - return 0; - } - - @Override - public int advance(int i) throws IOException { - return 0; - } - - @Override - public long cost() { - return 0; - } - }; - } - - /** - * Returns the value associated with the latest document. - * - * @return the value associated with the latest document - */ - public Long getDocValue() { - return docValue; - } - - /** - * Sets the value associated with the latest document. - * - * @param docValue the value to be associated with the latest document - */ - public void setDocValue(Long docValue) { - this.docValue = docValue; - } - /** * Returns the id of the latest document. * * @return the id of the latest document */ - public int getDocId() { + int getDocId() { return docId; } - /** - * Sets the id of the latest document. - * - * @param docId the ID of the latest document - */ - public void setDocId(int docId) { - this.docId = docId; - } - /** * Returns the DocIdSetIterator associated with this instance. * @@ -134,4 +59,32 @@ public void setDocId(int docId) { public DocIdSetIterator getDocIdSetIterator() { return docIdSetIterator; } + + public int nextDoc(int currentDocId) throws IOException { + // if doc id stored is less than or equal to the requested doc id , return the stored doc id + if (docId >= currentDocId) { + return docId; + } + docId = this.docIdSetIterator.nextDoc(); + return docId; + } + + public Long value(int currentDocId) throws IOException { + if (this.getDocIdSetIterator() instanceof SortedNumericDocValues) { + SortedNumericDocValues sortedNumericDocValues = (SortedNumericDocValues) this.getDocIdSetIterator(); + if (currentDocId < 0) { + throw new IllegalStateException("invalid doc id to fetch the next value"); + } + if (currentDocId == DocIdSetIterator.NO_MORE_DOCS) { + throw new IllegalStateException("DocValuesIterator is already exhausted"); + } + if (docId == DocIdSetIterator.NO_MORE_DOCS || docId != currentDocId) { + return null; + } + return sortedNumericDocValues.nextValue(); + + } else { + throw new IllegalStateException("Unsupported Iterator requested for SequentialDocValuesIterator"); + } + } } diff --git a/server/src/main/java/org/opensearch/index/mapper/CompositeMappedFieldType.java b/server/src/main/java/org/opensearch/index/mapper/CompositeMappedFieldType.java index e067e70621304..7239ddfb26c0d 100644 --- a/server/src/main/java/org/opensearch/index/mapper/CompositeMappedFieldType.java +++ b/server/src/main/java/org/opensearch/index/mapper/CompositeMappedFieldType.java @@ -72,6 +72,10 @@ public static CompositeFieldType fromName(String name) { } } + public CompositeFieldType getCompositeIndexType() { + return type; + } + public List fields() { return fields; } diff --git a/server/src/test/java/org/opensearch/index/codec/composite/datacube/startree/StarTreeDocValuesFormatTests.java b/server/src/test/java/org/opensearch/index/codec/composite/datacube/startree/StarTreeDocValuesFormatTests.java index 31df9a49bebfb..049d91bc42d9c 100644 --- a/server/src/test/java/org/opensearch/index/codec/composite/datacube/startree/StarTreeDocValuesFormatTests.java +++ b/server/src/test/java/org/opensearch/index/codec/composite/datacube/startree/StarTreeDocValuesFormatTests.java @@ -12,63 +12,165 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.lucene99.Lucene99Codec; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.BaseDocValuesFormatTestCase; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.tests.util.LuceneTestCase; -import org.opensearch.common.Rounding; +import org.opensearch.Version; +import org.opensearch.cluster.ClusterModule; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.CheckedConsumer; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.MapperTestUtils; import org.opensearch.index.codec.composite.Composite99Codec; -import org.opensearch.index.compositeindex.datacube.DateDimension; -import org.opensearch.index.compositeindex.datacube.Dimension; -import org.opensearch.index.compositeindex.datacube.Metric; -import org.opensearch.index.compositeindex.datacube.MetricStat; -import org.opensearch.index.compositeindex.datacube.NumericDimension; -import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; -import org.opensearch.index.compositeindex.datacube.startree.StarTreeFieldConfiguration; import org.opensearch.index.mapper.MapperService; -import org.opensearch.index.mapper.StarTreeMapper; +import org.opensearch.indices.IndicesModule; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.BeforeClass; -import java.util.ArrayList; +import java.io.IOException; import java.util.Collections; -import java.util.List; -import java.util.Set; -import org.mockito.Mockito; +import static org.opensearch.common.util.FeatureFlags.STAR_TREE_INDEX; /** * Star tree doc values Lucene tests */ @LuceneTestCase.SuppressSysoutChecks(bugUrl = "we log a lot on purpose") public class StarTreeDocValuesFormatTests extends BaseDocValuesFormatTestCase { + MapperService mapperService = null; + + @BeforeClass + public static void createMapper() throws Exception { + FeatureFlags.initializeFeatureFlags(Settings.builder().put(STAR_TREE_INDEX, "true").build()); + } + + @AfterClass + public static void clearMapper() { + FeatureFlags.initializeFeatureFlags(Settings.EMPTY); + } + + @After + public void teardown() throws IOException { + mapperService.close(); + } + @Override protected Codec getCodec() { - MapperService service = Mockito.mock(MapperService.class); - Mockito.when(service.getCompositeFieldTypes()).thenReturn(Set.of(getStarTreeFieldType())); final Logger testLogger = LogManager.getLogger(StarTreeDocValuesFormatTests.class); - return new Composite99Codec(Lucene99Codec.Mode.BEST_SPEED, service, testLogger); + + try { + createMapperService(getExpandedMapping("status", "size")); + } catch (IOException e) { + throw new RuntimeException(e); + } + Codec codec = new Composite99Codec(Lucene99Codec.Mode.BEST_SPEED, mapperService, testLogger); + return codec; } - private StarTreeMapper.StarTreeFieldType getStarTreeFieldType() { - List m1 = new ArrayList<>(); - m1.add(MetricStat.MAX); - Metric metric = new Metric("sndv", m1); - List d1CalendarIntervals = new ArrayList<>(); - d1CalendarIntervals.add(Rounding.DateTimeUnit.HOUR_OF_DAY); - StarTreeField starTreeField = getStarTreeField(d1CalendarIntervals, metric); + public void testStarTreeDocValues() throws IOException { + Directory directory = newDirectory(); + IndexWriterConfig conf = newIndexWriterConfig(null); + conf.setMergePolicy(newLogMergePolicy()); + RandomIndexWriter iw = new RandomIndexWriter(random(), directory, conf); + Document doc = new Document(); + doc.add(new SortedNumericDocValuesField("sndv", 1)); + doc.add(new SortedNumericDocValuesField("dv", 1)); + doc.add(new SortedNumericDocValuesField("field", 1)); + iw.addDocument(doc); + doc.add(new SortedNumericDocValuesField("sndv", 1)); + doc.add(new SortedNumericDocValuesField("dv", 1)); + doc.add(new SortedNumericDocValuesField("field", 1)); + iw.addDocument(doc); + iw.forceMerge(1); + doc.add(new SortedNumericDocValuesField("sndv", 2)); + doc.add(new SortedNumericDocValuesField("dv", 2)); + doc.add(new SortedNumericDocValuesField("field", 2)); + iw.addDocument(doc); + doc.add(new SortedNumericDocValuesField("sndv", 2)); + doc.add(new SortedNumericDocValuesField("dv", 2)); + doc.add(new SortedNumericDocValuesField("field", 2)); + iw.addDocument(doc); + iw.forceMerge(1); + iw.close(); + + // TODO : validate star tree structures that got created + directory.close(); + } - return new StarTreeMapper.StarTreeFieldType("star_tree", starTreeField); + private XContentBuilder getExpandedMapping(String dim, String metric) throws IOException { + return topMapping(b -> { + b.startObject("composite"); + b.startObject("startree"); + b.field("type", "star_tree"); + b.startObject("config"); + b.field("max_leaf_docs", 100); + b.startArray("ordered_dimensions"); + b.startObject(); + b.field("name", "sndv"); + b.endObject(); + b.startObject(); + b.field("name", "dv"); + b.endObject(); + b.endArray(); + b.startArray("metrics"); + b.startObject(); + b.field("name", "field"); + b.startArray("stats"); + b.value("sum"); + b.value("count"); // TODO : THIS TEST FAILS. + b.endArray(); + b.endObject(); + b.endArray(); + b.endObject(); + b.endObject(); + b.endObject(); + b.startObject("properties"); + b.startObject("sndv"); + b.field("type", "integer"); + b.endObject(); + b.startObject("dv"); + b.field("type", "integer"); + b.endObject(); + b.startObject("field"); + b.field("type", "integer"); + b.endObject(); + b.endObject(); + }); } - private static StarTreeField getStarTreeField(List d1CalendarIntervals, Metric metric1) { - DateDimension d1 = new DateDimension("field", d1CalendarIntervals); - NumericDimension d2 = new NumericDimension("dv"); + private XContentBuilder topMapping(CheckedConsumer buildFields) throws IOException { + XContentBuilder builder = XContentFactory.jsonBuilder().startObject().startObject("_doc"); + buildFields.accept(builder); + return builder.endObject().endObject(); + } - List metrics = List.of(metric1); - List dims = List.of(d1, d2); - StarTreeFieldConfiguration config = new StarTreeFieldConfiguration( - 100, - Collections.emptySet(), - StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP + private void createMapperService(XContentBuilder builder) throws IOException { + IndexMetadata indexMetadata = IndexMetadata.builder("test") + .settings( + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + ) + .putMapping(builder.toString()) + .build(); + IndicesModule indicesModule = new IndicesModule(Collections.emptyList()); + mapperService = MapperTestUtils.newMapperServiceWithHelperAnalyzer( + new NamedXContentRegistry(ClusterModule.getNamedXWriteables()), + createTempDir(), + Settings.EMPTY, + indicesModule, + "test" ); - - return new StarTreeField("starTree", dims, metrics, config); + mapperService.merge(indexMetadata, MapperService.MergeReason.INDEX_TEMPLATE); } } diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregatorTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregatorTests.java index e30e203406a6c..8e6e9e9974646 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregatorTests.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregatorTests.java @@ -13,7 +13,7 @@ import org.opensearch.test.OpenSearchTestCase; public class CountValueAggregatorTests extends OpenSearchTestCase { - private final CountValueAggregator aggregator = new CountValueAggregator(); + private final CountValueAggregator aggregator = new CountValueAggregator(StarTreeNumericType.LONG); public void testGetAggregationType() { assertEquals(MetricStat.COUNT.getTypeName(), aggregator.getAggregationType().getTypeName()); @@ -24,11 +24,11 @@ public void testGetAggregatedValueType() { } public void testGetInitialAggregatedValueForSegmentDocValue() { - assertEquals(1L, aggregator.getInitialAggregatedValueForSegmentDocValue(randomLong(), StarTreeNumericType.LONG), 0.0); + assertEquals(1L, aggregator.getInitialAggregatedValueForSegmentDocValue(randomLong()), 0.0); } public void testMergeAggregatedValueAndSegmentValue() { - assertEquals(3L, aggregator.mergeAggregatedValueAndSegmentValue(2L, 3L, StarTreeNumericType.LONG), 0.0); + assertEquals(3L, aggregator.mergeAggregatedValueAndSegmentValue(2L, 3L), 0.0); } public void testMergeAggregatedValues() { @@ -48,6 +48,6 @@ public void testToLongValue() { } public void testToStarTreeNumericTypeValue() { - assertEquals(3L, aggregator.toStarTreeNumericTypeValue(3L, StarTreeNumericType.LONG), 0.0); + assertEquals(3L, aggregator.toStarTreeNumericTypeValue(3L), 0.0); } } diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/MetricAggregatorInfoTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/MetricAggregatorInfoTests.java index d08f637a3f0a9..73e6aeb44cfd7 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/MetricAggregatorInfoTests.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/MetricAggregatorInfoTests.java @@ -19,8 +19,7 @@ public void testConstructor() { MetricStat.SUM, "column1", "star_tree_field", - IndexNumericFieldData.NumericType.DOUBLE, - null + IndexNumericFieldData.NumericType.DOUBLE ); assertEquals(MetricStat.SUM, pair.getMetricStat()); assertEquals("column1", pair.getField()); @@ -31,8 +30,7 @@ public void testCountStarConstructor() { MetricStat.COUNT, "anything", "star_tree_field", - IndexNumericFieldData.NumericType.DOUBLE, - null + IndexNumericFieldData.NumericType.DOUBLE ); assertEquals(MetricStat.COUNT, pair.getMetricStat()); assertEquals("anything", pair.getField()); @@ -43,8 +41,7 @@ public void testToFieldName() { MetricStat.SUM, "column2", "star_tree_field", - IndexNumericFieldData.NumericType.DOUBLE, - null + IndexNumericFieldData.NumericType.DOUBLE ); assertEquals("star_tree_field_column2_sum", pair.toFieldName()); } @@ -54,24 +51,22 @@ public void testEquals() { MetricStat.SUM, "column1", "star_tree_field", - IndexNumericFieldData.NumericType.DOUBLE, - null + IndexNumericFieldData.NumericType.DOUBLE ); MetricAggregatorInfo pair2 = new MetricAggregatorInfo( MetricStat.SUM, "column1", "star_tree_field", - IndexNumericFieldData.NumericType.DOUBLE, - null + IndexNumericFieldData.NumericType.DOUBLE ); assertEquals(pair1, pair2); assertNotEquals( pair1, - new MetricAggregatorInfo(MetricStat.COUNT, "column1", "star_tree_field", IndexNumericFieldData.NumericType.DOUBLE, null) + new MetricAggregatorInfo(MetricStat.COUNT, "column1", "star_tree_field", IndexNumericFieldData.NumericType.DOUBLE) ); assertNotEquals( pair1, - new MetricAggregatorInfo(MetricStat.SUM, "column2", "star_tree_field", IndexNumericFieldData.NumericType.DOUBLE, null) + new MetricAggregatorInfo(MetricStat.SUM, "column2", "star_tree_field", IndexNumericFieldData.NumericType.DOUBLE) ); } @@ -80,15 +75,13 @@ public void testHashCode() { MetricStat.SUM, "column1", "star_tree_field", - IndexNumericFieldData.NumericType.DOUBLE, - null + IndexNumericFieldData.NumericType.DOUBLE ); MetricAggregatorInfo pair2 = new MetricAggregatorInfo( MetricStat.SUM, "column1", "star_tree_field", - IndexNumericFieldData.NumericType.DOUBLE, - null + IndexNumericFieldData.NumericType.DOUBLE ); assertEquals(pair1.hashCode(), pair2.hashCode()); } @@ -98,22 +91,19 @@ public void testCompareTo() { MetricStat.SUM, "column1", "star_tree_field", - IndexNumericFieldData.NumericType.DOUBLE, - null + IndexNumericFieldData.NumericType.DOUBLE ); MetricAggregatorInfo pair2 = new MetricAggregatorInfo( MetricStat.SUM, "column2", "star_tree_field", - IndexNumericFieldData.NumericType.DOUBLE, - null + IndexNumericFieldData.NumericType.DOUBLE ); MetricAggregatorInfo pair3 = new MetricAggregatorInfo( MetricStat.COUNT, "column1", "star_tree_field", - IndexNumericFieldData.NumericType.DOUBLE, - null + IndexNumericFieldData.NumericType.DOUBLE ); assertTrue(pair1.compareTo(pair2) < 0); assertTrue(pair2.compareTo(pair1) > 0); diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/SumValueAggregatorTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/SumValueAggregatorTests.java index 3fb627e7cd434..dd66d4344c9e8 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/SumValueAggregatorTests.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/SumValueAggregatorTests.java @@ -20,7 +20,7 @@ public class SumValueAggregatorTests extends OpenSearchTestCase { @Before public void setup() { - aggregator = new SumValueAggregator(); + aggregator = new SumValueAggregator(StarTreeNumericType.LONG); } public void testGetAggregationType() { @@ -32,21 +32,18 @@ public void testGetAggregatedValueType() { } public void testGetInitialAggregatedValueForSegmentDocValue() { - assertEquals(1.0, aggregator.getInitialAggregatedValueForSegmentDocValue(1L, StarTreeNumericType.LONG), 0.0); - assertThrows( - NullPointerException.class, - () -> aggregator.getInitialAggregatedValueForSegmentDocValue(null, StarTreeNumericType.DOUBLE) - ); + assertEquals(1.0, aggregator.getInitialAggregatedValueForSegmentDocValue(1L), 0.0); + assertThrows(NullPointerException.class, () -> aggregator.getInitialAggregatedValueForSegmentDocValue(null)); } public void testMergeAggregatedValueAndSegmentValue() { aggregator.getInitialAggregatedValue(2.0); - assertEquals(5.0, aggregator.mergeAggregatedValueAndSegmentValue(2.0, 3L, StarTreeNumericType.LONG), 0.0); + assertEquals(5.0, aggregator.mergeAggregatedValueAndSegmentValue(2.0, 3L), 0.0); } public void testMergeAggregatedValueAndSegmentValue_nullSegmentDocValue() { aggregator.getInitialAggregatedValue(2.0); - assertThrows(NullPointerException.class, () -> aggregator.mergeAggregatedValueAndSegmentValue(2.0, null, StarTreeNumericType.LONG)); + assertThrows(NullPointerException.class, () -> aggregator.mergeAggregatedValueAndSegmentValue(2.0, null)); } public void testMergeAggregatedValues() { @@ -67,6 +64,6 @@ public void testToLongValue() { } public void testToStarTreeNumericTypeValue() { - assertEquals(NumericUtils.sortableLongToDouble(3L), aggregator.toStarTreeNumericTypeValue(3L, StarTreeNumericType.DOUBLE), 0.0); + assertEquals(NumericUtils.sortableLongToDouble(3L), aggregator.toStarTreeNumericTypeValue(3L), 0.0); } } diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/ValueAggregatorFactoryTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/ValueAggregatorFactoryTests.java index ce61ab839cc61..428668511fb2e 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/ValueAggregatorFactoryTests.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/ValueAggregatorFactoryTests.java @@ -15,7 +15,7 @@ public class ValueAggregatorFactoryTests extends OpenSearchTestCase { public void testGetValueAggregatorForSumType() { - ValueAggregator aggregator = ValueAggregatorFactory.getValueAggregator(MetricStat.SUM); + ValueAggregator aggregator = ValueAggregatorFactory.getValueAggregator(MetricStat.SUM, StarTreeNumericType.LONG); assertNotNull(aggregator); assertEquals(SumValueAggregator.class, aggregator.getClass()); } diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/AbstractStarTreeBuilderTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/AbstractStarTreeBuilderTests.java new file mode 100644 index 0000000000000..76a7875919a8b --- /dev/null +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/AbstractStarTreeBuilderTests.java @@ -0,0 +1,2251 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube.startree.builder; + +import org.apache.lucene.codecs.DocValuesProducer; +import org.apache.lucene.codecs.lucene99.Lucene99Codec; +import org.apache.lucene.index.DocValues; +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.EmptyDocValuesProducer; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.FieldInfos; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.SegmentInfo; +import org.apache.lucene.index.SegmentWriteState; +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.index.VectorEncoding; +import org.apache.lucene.index.VectorSimilarityFunction; +import org.apache.lucene.sandbox.document.HalfFloatPoint; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.InfoStream; +import org.apache.lucene.util.NumericUtils; +import org.apache.lucene.util.Version; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.codec.composite.datacube.startree.StarTreeValues; +import org.opensearch.index.compositeindex.datacube.Dimension; +import org.opensearch.index.compositeindex.datacube.Metric; +import org.opensearch.index.compositeindex.datacube.MetricStat; +import org.opensearch.index.compositeindex.datacube.NumericDimension; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeDocument; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeFieldConfiguration; +import org.opensearch.index.compositeindex.datacube.startree.utils.SequentialDocValuesIterator; +import org.opensearch.index.compositeindex.datacube.startree.utils.TreeNode; +import org.opensearch.index.mapper.ContentPath; +import org.opensearch.index.mapper.DocumentMapper; +import org.opensearch.index.mapper.Mapper; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.mapper.MappingLookup; +import org.opensearch.index.mapper.NumberFieldMapper; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Queue; +import java.util.Set; +import java.util.UUID; + +import static org.opensearch.index.compositeindex.datacube.startree.builder.BaseStarTreeBuilder.NUM_SEGMENT_DOCS; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public abstract class AbstractStarTreeBuilderTests extends OpenSearchTestCase { + protected MapperService mapperService; + protected List dimensionsOrder; + protected List fields = List.of(); + protected List metrics; + protected Directory directory; + protected FieldInfo[] fieldsInfo; + protected StarTreeField compositeField; + protected Map fieldProducerMap; + protected SegmentWriteState writeState; + private BaseStarTreeBuilder builder; + + @Before + public void setup() throws IOException { + fields = List.of("field1", "field2", "field3", "field4", "field5", "field6", "field7", "field8", "field9", "field10"); + + dimensionsOrder = List.of( + new NumericDimension("field1"), + new NumericDimension("field3"), + new NumericDimension("field5"), + new NumericDimension("field8") + ); + metrics = List.of( + new Metric("field2", List.of(MetricStat.SUM)), + new Metric("field4", List.of(MetricStat.SUM)), + new Metric("field6", List.of(MetricStat.COUNT)) + ); + + DocValuesProducer docValuesProducer = mock(DocValuesProducer.class); + + compositeField = new StarTreeField( + "test", + dimensionsOrder, + metrics, + new StarTreeFieldConfiguration(1, Set.of("field8"), StarTreeFieldConfiguration.StarTreeBuildMode.ON_HEAP) + ); + directory = newFSDirectory(createTempDir()); + + fieldsInfo = new FieldInfo[fields.size()]; + fieldProducerMap = new HashMap<>(); + for (int i = 0; i < fieldsInfo.length; i++) { + fieldsInfo[i] = new FieldInfo( + fields.get(i), + i, + false, + false, + true, + IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS, + DocValuesType.SORTED_NUMERIC, + -1, + Collections.emptyMap(), + 0, + 0, + 0, + 0, + VectorEncoding.FLOAT32, + VectorSimilarityFunction.EUCLIDEAN, + false, + false + ); + fieldProducerMap.put(fields.get(i), docValuesProducer); + } + writeState = getWriteState(5); + + mapperService = mock(MapperService.class); + DocumentMapper documentMapper = mock(DocumentMapper.class); + when(mapperService.documentMapper()).thenReturn(documentMapper); + Settings settings = Settings.builder().put(settings(org.opensearch.Version.CURRENT).build()).build(); + NumberFieldMapper numberFieldMapper1 = new NumberFieldMapper.Builder("field2", NumberFieldMapper.NumberType.DOUBLE, false, true) + .build(new Mapper.BuilderContext(settings, new ContentPath())); + NumberFieldMapper numberFieldMapper2 = new NumberFieldMapper.Builder("field4", NumberFieldMapper.NumberType.DOUBLE, false, true) + .build(new Mapper.BuilderContext(settings, new ContentPath())); + NumberFieldMapper numberFieldMapper3 = new NumberFieldMapper.Builder("field6", NumberFieldMapper.NumberType.DOUBLE, false, true) + .build(new Mapper.BuilderContext(settings, new ContentPath())); + MappingLookup fieldMappers = new MappingLookup( + Set.of(numberFieldMapper1, numberFieldMapper2, numberFieldMapper3), + Collections.emptyList(), + Collections.emptyList(), + 0, + null + ); + when(documentMapper.mappers()).thenReturn(fieldMappers); + } + + private SegmentWriteState getWriteState(int numDocs) { + FieldInfos fieldInfos = new FieldInfos(fieldsInfo); + SegmentInfo segmentInfo = new SegmentInfo( + directory, + Version.LATEST, + Version.LUCENE_9_11_0, + "test_segment", + numDocs, + false, + false, + new Lucene99Codec(), + new HashMap<>(), + UUID.randomUUID().toString().substring(0, 16).getBytes(StandardCharsets.UTF_8), + new HashMap<>(), + null + ); + return new SegmentWriteState(InfoStream.getDefault(), segmentInfo.dir, segmentInfo, fieldInfos, null, newIOContext(random())); + } + + public abstract BaseStarTreeBuilder getStarTreeBuilder( + StarTreeField starTreeField, + SegmentWriteState segmentWriteState, + MapperService mapperService + ) throws IOException; + + public void test_sortAndAggregateStarTreeDocuments() throws IOException { + + int noOfStarTreeDocuments = 5; + StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + + starTreeDocuments[0] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 12.0, 10.0, randomDouble() }); + starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 10.0, 6.0, randomDouble() }); + starTreeDocuments[2] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 14.0, 12.0, randomDouble() }); + starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 9.0, 4.0, randomDouble() }); + starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 11.0, 16.0, randomDouble() }); + + StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + for (int i = 0; i < noOfStarTreeDocuments; i++) { + long metric1 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[0]); + long metric2 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[1]); + long metric3 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[2]); + segmentStarTreeDocuments[i] = new StarTreeDocument(starTreeDocuments[i].dimensions, new Long[] { metric1, metric2, metric3 }); + } + List inorderStarTreeDocuments = List.of( + new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L }), + new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, 34.0, 3L }) + ); + Iterator expectedStarTreeDocumentIterator = inorderStarTreeDocuments.iterator(); + + SequentialDocValuesIterator[] dimsIterators = getDimensionIterators(segmentStarTreeDocuments); + List metricsIterators = getMetricIterators(segmentStarTreeDocuments); + builder = getStarTreeBuilder(compositeField, writeState, mapperService); + Iterator segmentStarTreeDocumentIterator = builder.sortAndAggregateSegmentDocuments( + dimsIterators, + metricsIterators + ); + + int numOfAggregatedDocuments = 0; + while (segmentStarTreeDocumentIterator.hasNext() && expectedStarTreeDocumentIterator.hasNext()) { + StarTreeDocument resultStarTreeDocument = segmentStarTreeDocumentIterator.next(); + StarTreeDocument expectedStarTreeDocument = expectedStarTreeDocumentIterator.next(); + + assertEquals(expectedStarTreeDocument.dimensions[0], resultStarTreeDocument.dimensions[0]); + assertEquals(expectedStarTreeDocument.dimensions[1], resultStarTreeDocument.dimensions[1]); + assertEquals(expectedStarTreeDocument.dimensions[2], resultStarTreeDocument.dimensions[2]); + assertEquals(expectedStarTreeDocument.dimensions[3], resultStarTreeDocument.dimensions[3]); + assertEquals(expectedStarTreeDocument.metrics[0], resultStarTreeDocument.metrics[0]); + assertEquals(expectedStarTreeDocument.metrics[1], resultStarTreeDocument.metrics[1]); + assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); + + numOfAggregatedDocuments++; + } + + assertEquals(inorderStarTreeDocuments.size(), numOfAggregatedDocuments); + } + + SequentialDocValuesIterator[] getDimensionIterators(StarTreeDocument[] starTreeDocuments) { + SequentialDocValuesIterator[] sequentialDocValuesIterators = + new SequentialDocValuesIterator[starTreeDocuments[0].dimensions.length]; + for (int j = 0; j < starTreeDocuments[0].dimensions.length; j++) { + List dimList = new ArrayList<>(); + List docsWithField = new ArrayList<>(); + + for (int i = 0; i < starTreeDocuments.length; i++) { + if (starTreeDocuments[i].dimensions[j] != null) { + dimList.add(starTreeDocuments[i].dimensions[j]); + docsWithField.add(i); + } + } + sequentialDocValuesIterators[j] = new SequentialDocValuesIterator(getSortedNumericMock(dimList, docsWithField)); + } + return sequentialDocValuesIterators; + } + + List getMetricIterators(StarTreeDocument[] starTreeDocuments) { + List sequentialDocValuesIterators = new ArrayList<>(); + for (int j = 0; j < starTreeDocuments[0].metrics.length; j++) { + List metricslist = new ArrayList<>(); + List docsWithField = new ArrayList<>(); + + for (int i = 0; i < starTreeDocuments.length; i++) { + if (starTreeDocuments[i].metrics[j] != null) { + metricslist.add((long) starTreeDocuments[i].metrics[j]); + docsWithField.add(i); + } + } + sequentialDocValuesIterators.add(new SequentialDocValuesIterator(getSortedNumericMock(metricslist, docsWithField))); + } + return sequentialDocValuesIterators; + } + + public void test_sortAndAggregateStarTreeDocuments_nullMetric() throws IOException { + + int noOfStarTreeDocuments = 5; + StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + + starTreeDocuments[0] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 12.0, 10.0, randomDouble() }); + starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 10.0, 6.0, randomDouble() }); + starTreeDocuments[2] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 14.0, 12.0, randomDouble() }); + starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 9.0, 4.0, randomDouble() }); + starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 11.0, null, randomDouble() }); + + List inorderStarTreeDocuments = List.of( + new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L }), + new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, 18.0, 3L }) + ); + Iterator expectedStarTreeDocumentIterator = inorderStarTreeDocuments.iterator(); + + StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + for (int i = 0; i < noOfStarTreeDocuments; i++) { + Long metric1 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[0]); + Long metric2 = starTreeDocuments[i].metrics[1] != null + ? NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[1]) + : null; + Long metric3 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[2]); + segmentStarTreeDocuments[i] = new StarTreeDocument(starTreeDocuments[i].dimensions, new Object[] { metric1, metric2, metric3 }); + } + SequentialDocValuesIterator[] dimsIterators = getDimensionIterators(segmentStarTreeDocuments); + List metricsIterators = getMetricIterators(segmentStarTreeDocuments); + builder = getStarTreeBuilder(compositeField, writeState, mapperService); + Iterator segmentStarTreeDocumentIterator = builder.sortAndAggregateSegmentDocuments( + dimsIterators, + metricsIterators + ); + + while (segmentStarTreeDocumentIterator.hasNext() && expectedStarTreeDocumentIterator.hasNext()) { + StarTreeDocument resultStarTreeDocument = segmentStarTreeDocumentIterator.next(); + StarTreeDocument expectedStarTreeDocument = expectedStarTreeDocumentIterator.next(); + assertEquals(expectedStarTreeDocument.dimensions[0], resultStarTreeDocument.dimensions[0]); + assertEquals(expectedStarTreeDocument.dimensions[1], resultStarTreeDocument.dimensions[1]); + assertEquals(expectedStarTreeDocument.dimensions[2], resultStarTreeDocument.dimensions[2]); + assertEquals(expectedStarTreeDocument.dimensions[3], resultStarTreeDocument.dimensions[3]); + assertEquals(expectedStarTreeDocument.metrics[0], resultStarTreeDocument.metrics[0]); + assertEquals(expectedStarTreeDocument.metrics[1], resultStarTreeDocument.metrics[1]); + assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); + } + } + + public void test_sortAndAggregateStarTreeDocuments_nullMetricField() throws IOException { + + int noOfStarTreeDocuments = 5; + StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + // Setting second metric iterator as empty sorted numeric , indicating a metric field is null + starTreeDocuments[0] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 12.0, null, randomDouble() }); + starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 10.0, null, randomDouble() }); + starTreeDocuments[2] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 14.0, null, randomDouble() }); + starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 9.0, null, randomDouble() }); + starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 11.0, null, randomDouble() }); + + List inorderStarTreeDocuments = List.of( + new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { 21.0, 0.0, 2L }), + new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, 0.0, 3L }) + ); + Iterator expectedStarTreeDocumentIterator = inorderStarTreeDocuments.iterator(); + + StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + for (int i = 0; i < noOfStarTreeDocuments; i++) { + Long metric1 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[0]); + Long metric2 = starTreeDocuments[i].metrics[1] != null + ? NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[1]) + : null; + Long metric3 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[2]); + segmentStarTreeDocuments[i] = new StarTreeDocument(starTreeDocuments[i].dimensions, new Object[] { metric1, metric2, metric3 }); + } + SequentialDocValuesIterator[] dimsIterators = getDimensionIterators(segmentStarTreeDocuments); + List metricsIterators = getMetricIterators(segmentStarTreeDocuments); + builder = getStarTreeBuilder(compositeField, writeState, mapperService); + Iterator segmentStarTreeDocumentIterator = builder.sortAndAggregateSegmentDocuments( + dimsIterators, + metricsIterators + ); + + while (segmentStarTreeDocumentIterator.hasNext() && expectedStarTreeDocumentIterator.hasNext()) { + StarTreeDocument resultStarTreeDocument = segmentStarTreeDocumentIterator.next(); + StarTreeDocument expectedStarTreeDocument = expectedStarTreeDocumentIterator.next(); + assertEquals(expectedStarTreeDocument.dimensions[0], resultStarTreeDocument.dimensions[0]); + assertEquals(expectedStarTreeDocument.dimensions[1], resultStarTreeDocument.dimensions[1]); + assertEquals(expectedStarTreeDocument.dimensions[2], resultStarTreeDocument.dimensions[2]); + assertEquals(expectedStarTreeDocument.dimensions[3], resultStarTreeDocument.dimensions[3]); + assertEquals(expectedStarTreeDocument.metrics[0], resultStarTreeDocument.metrics[0]); + assertEquals(expectedStarTreeDocument.metrics[1], resultStarTreeDocument.metrics[1]); + assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); + } + } + + public void test_sortAndAggregateStarTreeDocuments_nullDimensionField() throws IOException { + int noOfStarTreeDocuments = 5; + StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + // Setting second metric iterator as empty sorted numeric , indicating a metric field is null + starTreeDocuments[0] = new StarTreeDocument(new Long[] { 2L, null, 3L, 4L }, new Double[] { 12.0, null, randomDouble() }); + starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 10.0, null, randomDouble() }); + starTreeDocuments[2] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 14.0, null, randomDouble() }); + starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, null, 3L, 4L }, new Double[] { 9.0, null, randomDouble() }); + starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 11.0, null, randomDouble() }); + + List inorderStarTreeDocuments = List.of( + new StarTreeDocument(new Long[] { 2L, null, 3L, 4L }, new Object[] { 21.0, 0.0, 2L }), + new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, 0.0, 3L }) + ); + Iterator expectedStarTreeDocumentIterator = inorderStarTreeDocuments.iterator(); + + StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + for (int i = 0; i < noOfStarTreeDocuments; i++) { + Long metric1 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[0]); + Long metric2 = starTreeDocuments[i].metrics[1] != null + ? NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[1]) + : null; + Long metric3 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[2]); + segmentStarTreeDocuments[i] = new StarTreeDocument(starTreeDocuments[i].dimensions, new Object[] { metric1, metric2, metric3 }); + } + SequentialDocValuesIterator[] dimsIterators = getDimensionIterators(segmentStarTreeDocuments); + List metricsIterators = getMetricIterators(segmentStarTreeDocuments); + builder = getStarTreeBuilder(compositeField, writeState, mapperService); + Iterator segmentStarTreeDocumentIterator = builder.sortAndAggregateSegmentDocuments( + dimsIterators, + metricsIterators + ); + + while (segmentStarTreeDocumentIterator.hasNext() && expectedStarTreeDocumentIterator.hasNext()) { + StarTreeDocument resultStarTreeDocument = segmentStarTreeDocumentIterator.next(); + StarTreeDocument expectedStarTreeDocument = expectedStarTreeDocumentIterator.next(); + assertEquals(expectedStarTreeDocument.dimensions[0], resultStarTreeDocument.dimensions[0]); + assertEquals(expectedStarTreeDocument.dimensions[1], resultStarTreeDocument.dimensions[1]); + assertEquals(expectedStarTreeDocument.dimensions[2], resultStarTreeDocument.dimensions[2]); + assertEquals(expectedStarTreeDocument.dimensions[3], resultStarTreeDocument.dimensions[3]); + assertEquals(expectedStarTreeDocument.metrics[0], resultStarTreeDocument.metrics[0]); + assertEquals(expectedStarTreeDocument.metrics[1], resultStarTreeDocument.metrics[1]); + assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); + } + } + + public void test_sortAndAggregateStarTreeDocuments_nullDimensionsAndNullMetrics() throws IOException { + int noOfStarTreeDocuments = 5; + StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + // Setting second metric iterator as empty sorted numeric , indicating a metric field is null + starTreeDocuments[0] = new StarTreeDocument(new Long[] { null, null, null, null }, new Double[] { null, null, null }); + starTreeDocuments[1] = new StarTreeDocument(new Long[] { null, null, null, null }, new Double[] { null, null, null }); + starTreeDocuments[2] = new StarTreeDocument(new Long[] { null, null, null, null }, new Double[] { null, null, null }); + starTreeDocuments[3] = new StarTreeDocument(new Long[] { null, null, null, null }, new Double[] { null, null, null }); + starTreeDocuments[4] = new StarTreeDocument(new Long[] { null, null, null, null }, new Double[] { null, null, null }); + + List inorderStarTreeDocuments = List.of(); + Iterator expectedStarTreeDocumentIterator = inorderStarTreeDocuments.iterator(); + + StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + for (int i = 0; i < noOfStarTreeDocuments; i++) { + Long metric1 = starTreeDocuments[i].metrics[1] != null + ? NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[0]) + : null; + Long metric2 = starTreeDocuments[i].metrics[1] != null + ? NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[1]) + : null; + Long metric3 = starTreeDocuments[i].metrics[1] != null + ? NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[2]) + : null; + segmentStarTreeDocuments[i] = new StarTreeDocument(starTreeDocuments[i].dimensions, new Object[] { metric1, metric2, metric3 }); + } + SequentialDocValuesIterator[] dimsIterators = getDimensionIterators(segmentStarTreeDocuments); + List metricsIterators = getMetricIterators(segmentStarTreeDocuments); + builder = getStarTreeBuilder(compositeField, writeState, mapperService); + Iterator segmentStarTreeDocumentIterator = builder.sortAndAggregateSegmentDocuments( + dimsIterators, + metricsIterators + ); + + while (segmentStarTreeDocumentIterator.hasNext() && expectedStarTreeDocumentIterator.hasNext()) { + StarTreeDocument resultStarTreeDocument = segmentStarTreeDocumentIterator.next(); + StarTreeDocument expectedStarTreeDocument = expectedStarTreeDocumentIterator.next(); + assertEquals(expectedStarTreeDocument.dimensions[0], resultStarTreeDocument.dimensions[0]); + assertEquals(expectedStarTreeDocument.dimensions[1], resultStarTreeDocument.dimensions[1]); + assertEquals(expectedStarTreeDocument.dimensions[2], resultStarTreeDocument.dimensions[2]); + assertEquals(expectedStarTreeDocument.dimensions[3], resultStarTreeDocument.dimensions[3]); + assertEquals(expectedStarTreeDocument.metrics[0], resultStarTreeDocument.metrics[0]); + assertEquals(expectedStarTreeDocument.metrics[1], resultStarTreeDocument.metrics[1]); + assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); + } + } + + public void test_sortAndAggregateStarTreeDocuments_emptyDimensions() throws IOException { + + int noOfStarTreeDocuments = 5; + StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + // Setting second metric iterator as empty sorted numeric , indicating a metric field is null + starTreeDocuments[0] = new StarTreeDocument(new Long[] { null, null, null, null }, new Double[] { 12.0, null, randomDouble() }); + starTreeDocuments[1] = new StarTreeDocument(new Long[] { null, null, null, null }, new Double[] { 10.0, null, randomDouble() }); + starTreeDocuments[2] = new StarTreeDocument(new Long[] { null, null, null, null }, new Double[] { 14.0, null, randomDouble() }); + starTreeDocuments[3] = new StarTreeDocument(new Long[] { null, null, null, null }, new Double[] { 9.0, null, randomDouble() }); + starTreeDocuments[4] = new StarTreeDocument(new Long[] { null, null, null, null }, new Double[] { 11.0, null, randomDouble() }); + + List inorderStarTreeDocuments = List.of( + new StarTreeDocument(new Long[] { null, null, null, null }, new Object[] { 56.0, 0.0, 5L }) + ); + Iterator expectedStarTreeDocumentIterator = inorderStarTreeDocuments.iterator(); + + StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + for (int i = 0; i < noOfStarTreeDocuments; i++) { + Long metric1 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[0]); + Long metric2 = starTreeDocuments[i].metrics[1] != null + ? NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[1]) + : null; + Long metric3 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[2]); + segmentStarTreeDocuments[i] = new StarTreeDocument(starTreeDocuments[i].dimensions, new Object[] { metric1, metric2, metric3 }); + } + SequentialDocValuesIterator[] dimsIterators = getDimensionIterators(segmentStarTreeDocuments); + List metricsIterators = getMetricIterators(segmentStarTreeDocuments); + builder = getStarTreeBuilder(compositeField, writeState, mapperService); + Iterator segmentStarTreeDocumentIterator = builder.sortAndAggregateSegmentDocuments( + dimsIterators, + metricsIterators + ); + + while (segmentStarTreeDocumentIterator.hasNext() && expectedStarTreeDocumentIterator.hasNext()) { + StarTreeDocument resultStarTreeDocument = segmentStarTreeDocumentIterator.next(); + StarTreeDocument expectedStarTreeDocument = expectedStarTreeDocumentIterator.next(); + assertEquals(expectedStarTreeDocument.dimensions[0], resultStarTreeDocument.dimensions[0]); + assertEquals(expectedStarTreeDocument.dimensions[1], resultStarTreeDocument.dimensions[1]); + assertEquals(expectedStarTreeDocument.dimensions[2], resultStarTreeDocument.dimensions[2]); + assertEquals(expectedStarTreeDocument.dimensions[3], resultStarTreeDocument.dimensions[3]); + assertEquals(expectedStarTreeDocument.metrics[0], resultStarTreeDocument.metrics[0]); + assertEquals(expectedStarTreeDocument.metrics[1], resultStarTreeDocument.metrics[1]); + assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); + } + } + + public void test_sortAndAggregateStarTreeDocument_longMaxAndLongMinDimensions() throws IOException { + + int noOfStarTreeDocuments = 5; + StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + + starTreeDocuments[0] = new StarTreeDocument(new Long[] { Long.MIN_VALUE, 4L, 3L, 4L }, new Double[] { 12.0, 10.0, randomDouble() }); + starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, Long.MAX_VALUE }, new Double[] { 10.0, 6.0, randomDouble() }); + starTreeDocuments[2] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, Long.MAX_VALUE }, new Double[] { 14.0, 12.0, randomDouble() }); + starTreeDocuments[3] = new StarTreeDocument(new Long[] { Long.MIN_VALUE, 4L, 3L, 4L }, new Double[] { 9.0, 4.0, randomDouble() }); + starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, Long.MAX_VALUE }, new Double[] { 11.0, 16.0, randomDouble() }); + + List inorderStarTreeDocuments = List.of( + new StarTreeDocument(new Long[] { Long.MIN_VALUE, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L }), + new StarTreeDocument(new Long[] { 3L, 4L, 2L, Long.MAX_VALUE }, new Object[] { 35.0, 34.0, 3L }) + ); + Iterator expectedStarTreeDocumentIterator = inorderStarTreeDocuments.iterator(); + + StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + for (int i = 0; i < noOfStarTreeDocuments; i++) { + long metric1 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[0]); + long metric2 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[1]); + long metric3 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[2]); + segmentStarTreeDocuments[i] = new StarTreeDocument(starTreeDocuments[i].dimensions, new Long[] { metric1, metric2, metric3 }); + } + + SequentialDocValuesIterator[] dimsIterators = getDimensionIterators(segmentStarTreeDocuments); + List metricsIterators = getMetricIterators(segmentStarTreeDocuments); + builder = getStarTreeBuilder(compositeField, writeState, mapperService); + Iterator segmentStarTreeDocumentIterator = builder.sortAndAggregateSegmentDocuments( + dimsIterators, + metricsIterators + ); + int numOfAggregatedDocuments = 0; + while (segmentStarTreeDocumentIterator.hasNext() && expectedStarTreeDocumentIterator.hasNext()) { + StarTreeDocument resultStarTreeDocument = segmentStarTreeDocumentIterator.next(); + StarTreeDocument expectedStarTreeDocument = expectedStarTreeDocumentIterator.next(); + + assertEquals(expectedStarTreeDocument.dimensions[0], resultStarTreeDocument.dimensions[0]); + assertEquals(expectedStarTreeDocument.dimensions[1], resultStarTreeDocument.dimensions[1]); + assertEquals(expectedStarTreeDocument.dimensions[2], resultStarTreeDocument.dimensions[2]); + assertEquals(expectedStarTreeDocument.dimensions[3], resultStarTreeDocument.dimensions[3]); + assertEquals(expectedStarTreeDocument.metrics[0], resultStarTreeDocument.metrics[0]); + assertEquals(expectedStarTreeDocument.metrics[1], resultStarTreeDocument.metrics[1]); + assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); + + numOfAggregatedDocuments++; + } + + assertEquals(inorderStarTreeDocuments.size(), numOfAggregatedDocuments); + + } + + public void test_sortAndAggregateStarTreeDocument_DoubleMaxAndDoubleMinMetrics() throws IOException { + + int noOfStarTreeDocuments = 5; + StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + + starTreeDocuments[0] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { Double.MAX_VALUE, 10.0, randomDouble() }); + starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 10.0, 6.0, randomDouble() }); + starTreeDocuments[2] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 14.0, Double.MIN_VALUE, randomDouble() }); + starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 9.0, 4.0, randomDouble() }); + starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 11.0, 16.0, randomDouble() }); + + List inorderStarTreeDocuments = List.of( + new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { Double.MAX_VALUE + 9, 14.0, 2L }), + new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, Double.MIN_VALUE + 22, 3L }) + ); + Iterator expectedStarTreeDocumentIterator = inorderStarTreeDocuments.iterator(); + + StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + for (int i = 0; i < noOfStarTreeDocuments; i++) { + long metric1 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[0]); + long metric2 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[1]); + long metric3 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[2]); + segmentStarTreeDocuments[i] = new StarTreeDocument(starTreeDocuments[i].dimensions, new Long[] { metric1, metric2, metric3 }); + } + + SequentialDocValuesIterator[] dimsIterators = getDimensionIterators(segmentStarTreeDocuments); + List metricsIterators = getMetricIterators(segmentStarTreeDocuments); + builder = getStarTreeBuilder(compositeField, writeState, mapperService); + Iterator segmentStarTreeDocumentIterator = builder.sortAndAggregateSegmentDocuments( + dimsIterators, + metricsIterators + ); + int numOfAggregatedDocuments = 0; + while (segmentStarTreeDocumentIterator.hasNext() && expectedStarTreeDocumentIterator.hasNext()) { + StarTreeDocument resultStarTreeDocument = segmentStarTreeDocumentIterator.next(); + StarTreeDocument expectedStarTreeDocument = expectedStarTreeDocumentIterator.next(); + + assertEquals(expectedStarTreeDocument.dimensions[0], resultStarTreeDocument.dimensions[0]); + assertEquals(expectedStarTreeDocument.dimensions[1], resultStarTreeDocument.dimensions[1]); + assertEquals(expectedStarTreeDocument.dimensions[2], resultStarTreeDocument.dimensions[2]); + assertEquals(expectedStarTreeDocument.dimensions[3], resultStarTreeDocument.dimensions[3]); + assertEquals(expectedStarTreeDocument.metrics[0], resultStarTreeDocument.metrics[0]); + assertEquals(expectedStarTreeDocument.metrics[1], resultStarTreeDocument.metrics[1]); + assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); + + numOfAggregatedDocuments++; + } + + assertEquals(inorderStarTreeDocuments.size(), numOfAggregatedDocuments); + + } + + public void test_build_halfFloatMetrics() throws IOException { + + mapperService = mock(MapperService.class); + DocumentMapper documentMapper = mock(DocumentMapper.class); + when(mapperService.documentMapper()).thenReturn(documentMapper); + Settings settings = Settings.builder().put(settings(org.opensearch.Version.CURRENT).build()).build(); + NumberFieldMapper numberFieldMapper1 = new NumberFieldMapper.Builder("field2", NumberFieldMapper.NumberType.HALF_FLOAT, false, true) + .build(new Mapper.BuilderContext(settings, new ContentPath())); + NumberFieldMapper numberFieldMapper2 = new NumberFieldMapper.Builder("field4", NumberFieldMapper.NumberType.HALF_FLOAT, false, true) + .build(new Mapper.BuilderContext(settings, new ContentPath())); + NumberFieldMapper numberFieldMapper3 = new NumberFieldMapper.Builder("field6", NumberFieldMapper.NumberType.HALF_FLOAT, false, true) + .build(new Mapper.BuilderContext(settings, new ContentPath())); + MappingLookup fieldMappers = new MappingLookup( + Set.of(numberFieldMapper1, numberFieldMapper2, numberFieldMapper3), + Collections.emptyList(), + Collections.emptyList(), + 0, + null + ); + when(documentMapper.mappers()).thenReturn(fieldMappers); + + int noOfStarTreeDocuments = 5; + StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + + starTreeDocuments[0] = new StarTreeDocument( + new Long[] { 2L, 4L, 3L, 4L }, + new HalfFloatPoint[] { new HalfFloatPoint("hf1", 12), new HalfFloatPoint("hf6", 10), new HalfFloatPoint("field6", 10) } + ); + starTreeDocuments[1] = new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new HalfFloatPoint[] { new HalfFloatPoint("hf2", 10), new HalfFloatPoint("hf7", 6), new HalfFloatPoint("field6", 10) } + ); + starTreeDocuments[2] = new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new HalfFloatPoint[] { new HalfFloatPoint("hf3", 14), new HalfFloatPoint("hf8", 12), new HalfFloatPoint("field6", 10) } + ); + starTreeDocuments[3] = new StarTreeDocument( + new Long[] { 2L, 4L, 3L, 4L }, + new HalfFloatPoint[] { new HalfFloatPoint("hf4", 9), new HalfFloatPoint("hf9", 4), new HalfFloatPoint("field6", 10) } + ); + starTreeDocuments[4] = new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new HalfFloatPoint[] { new HalfFloatPoint("hf5", 11), new HalfFloatPoint("hf10", 16), new HalfFloatPoint("field6", 10) } + ); + + StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + for (int i = 0; i < noOfStarTreeDocuments; i++) { + long metric1 = HalfFloatPoint.halfFloatToSortableShort( + ((HalfFloatPoint) starTreeDocuments[i].metrics[0]).numericValue().floatValue() + ); + long metric2 = HalfFloatPoint.halfFloatToSortableShort( + ((HalfFloatPoint) starTreeDocuments[i].metrics[1]).numericValue().floatValue() + ); + long metric3 = HalfFloatPoint.halfFloatToSortableShort( + ((HalfFloatPoint) starTreeDocuments[i].metrics[2]).numericValue().floatValue() + ); + segmentStarTreeDocuments[i] = new StarTreeDocument(starTreeDocuments[i].dimensions, new Long[] { metric1, metric2, metric3 }); + } + + SequentialDocValuesIterator[] dimsIterators = getDimensionIterators(segmentStarTreeDocuments); + List metricsIterators = getMetricIterators(segmentStarTreeDocuments); + builder = getStarTreeBuilder(compositeField, writeState, mapperService); + + Iterator segmentStarTreeDocumentIterator = builder.sortAndAggregateSegmentDocuments( + dimsIterators, + metricsIterators + ); + builder.build(segmentStarTreeDocumentIterator); + List resultStarTreeDocuments = builder.getStarTreeDocuments(); + assertEquals(7, resultStarTreeDocuments.size()); + + Iterator expectedStarTreeDocumentIterator = getExpectedStarTreeDocumentIterator(); + assertStarTreeDocuments(resultStarTreeDocuments, expectedStarTreeDocumentIterator); + } + + public void test_build_floatMetrics() throws IOException { + + mapperService = mock(MapperService.class); + DocumentMapper documentMapper = mock(DocumentMapper.class); + when(mapperService.documentMapper()).thenReturn(documentMapper); + Settings settings = Settings.builder().put(settings(org.opensearch.Version.CURRENT).build()).build(); + NumberFieldMapper numberFieldMapper1 = new NumberFieldMapper.Builder("field2", NumberFieldMapper.NumberType.FLOAT, false, true) + .build(new Mapper.BuilderContext(settings, new ContentPath())); + NumberFieldMapper numberFieldMapper2 = new NumberFieldMapper.Builder("field4", NumberFieldMapper.NumberType.FLOAT, false, true) + .build(new Mapper.BuilderContext(settings, new ContentPath())); + NumberFieldMapper numberFieldMapper3 = new NumberFieldMapper.Builder("field6", NumberFieldMapper.NumberType.FLOAT, false, true) + .build(new Mapper.BuilderContext(settings, new ContentPath())); + MappingLookup fieldMappers = new MappingLookup( + Set.of(numberFieldMapper1, numberFieldMapper2, numberFieldMapper3), + Collections.emptyList(), + Collections.emptyList(), + 0, + null + ); + when(documentMapper.mappers()).thenReturn(fieldMappers); + + int noOfStarTreeDocuments = 5; + StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + + starTreeDocuments[0] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Float[] { 12.0F, 10.0F, randomFloat() }); + starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Float[] { 10.0F, 6.0F, randomFloat() }); + starTreeDocuments[2] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Float[] { 14.0F, 12.0F, randomFloat() }); + starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Float[] { 9.0F, 4.0F, randomFloat() }); + starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Float[] { 11.0F, 16.0F, randomFloat() }); + + StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + for (int i = 0; i < noOfStarTreeDocuments; i++) { + long metric1 = NumericUtils.floatToSortableInt((Float) starTreeDocuments[i].metrics[0]); + long metric2 = NumericUtils.floatToSortableInt((Float) starTreeDocuments[i].metrics[1]); + long metric3 = NumericUtils.floatToSortableInt((Float) starTreeDocuments[i].metrics[2]); + segmentStarTreeDocuments[i] = new StarTreeDocument(starTreeDocuments[i].dimensions, new Long[] { metric1, metric2, metric3 }); + } + + SequentialDocValuesIterator[] dimsIterators = getDimensionIterators(segmentStarTreeDocuments); + List metricsIterators = getMetricIterators(segmentStarTreeDocuments); + builder = getStarTreeBuilder(compositeField, writeState, mapperService); + Iterator segmentStarTreeDocumentIterator = builder.sortAndAggregateSegmentDocuments( + dimsIterators, + metricsIterators + ); + builder.build(segmentStarTreeDocumentIterator); + + List resultStarTreeDocuments = builder.getStarTreeDocuments(); + assertEquals(7, resultStarTreeDocuments.size()); + + Iterator expectedStarTreeDocumentIterator = getExpectedStarTreeDocumentIterator(); + assertStarTreeDocuments(resultStarTreeDocuments, expectedStarTreeDocumentIterator); + } + + public void test_build_longMetrics() throws IOException { + + mapperService = mock(MapperService.class); + DocumentMapper documentMapper = mock(DocumentMapper.class); + when(mapperService.documentMapper()).thenReturn(documentMapper); + Settings settings = Settings.builder().put(settings(org.opensearch.Version.CURRENT).build()).build(); + NumberFieldMapper numberFieldMapper1 = new NumberFieldMapper.Builder("field2", NumberFieldMapper.NumberType.LONG, false, true) + .build(new Mapper.BuilderContext(settings, new ContentPath())); + NumberFieldMapper numberFieldMapper2 = new NumberFieldMapper.Builder("field4", NumberFieldMapper.NumberType.LONG, false, true) + .build(new Mapper.BuilderContext(settings, new ContentPath())); + NumberFieldMapper numberFieldMapper3 = new NumberFieldMapper.Builder("field6", NumberFieldMapper.NumberType.LONG, false, true) + .build(new Mapper.BuilderContext(settings, new ContentPath())); + MappingLookup fieldMappers = new MappingLookup( + Set.of(numberFieldMapper1, numberFieldMapper2, numberFieldMapper3), + Collections.emptyList(), + Collections.emptyList(), + 0, + null + ); + when(documentMapper.mappers()).thenReturn(fieldMappers); + + int noOfStarTreeDocuments = 5; + StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + + starTreeDocuments[0] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Long[] { 12L, 10L, randomLong() }); + starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Long[] { 10L, 6L, randomLong() }); + starTreeDocuments[2] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Long[] { 14L, 12L, randomLong() }); + starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Long[] { 9L, 4L, randomLong() }); + starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Long[] { 11L, 16L, randomLong() }); + + StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + for (int i = 0; i < noOfStarTreeDocuments; i++) { + long metric1 = (Long) starTreeDocuments[i].metrics[0]; + long metric2 = (Long) starTreeDocuments[i].metrics[1]; + long metric3 = (Long) starTreeDocuments[i].metrics[2]; + segmentStarTreeDocuments[i] = new StarTreeDocument(starTreeDocuments[i].dimensions, new Long[] { metric1, metric2, metric3 }); + } + + SequentialDocValuesIterator[] dimsIterators = getDimensionIterators(segmentStarTreeDocuments); + List metricsIterators = getMetricIterators(segmentStarTreeDocuments); + builder = getStarTreeBuilder(compositeField, writeState, mapperService); + Iterator segmentStarTreeDocumentIterator = builder.sortAndAggregateSegmentDocuments( + dimsIterators, + metricsIterators + ); + builder.build(segmentStarTreeDocumentIterator); + + List resultStarTreeDocuments = builder.getStarTreeDocuments(); + assertEquals(7, resultStarTreeDocuments.size()); + + Iterator expectedStarTreeDocumentIterator = getExpectedStarTreeDocumentIterator(); + assertStarTreeDocuments(resultStarTreeDocuments, expectedStarTreeDocumentIterator); + } + + private static Iterator getExpectedStarTreeDocumentIterator() { + List expectedStarTreeDocuments = List.of( + new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L }), + new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, 34.0, 3L }), + new StarTreeDocument(new Long[] { null, 4L, 2L, 1L }, new Object[] { 35.0, 34.0, 3L }), + new StarTreeDocument(new Long[] { null, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L }), + new StarTreeDocument(new Long[] { null, 4L, null, 1L }, new Object[] { 35.0, 34.0, 3L }), + new StarTreeDocument(new Long[] { null, 4L, null, 4L }, new Object[] { 21.0, 14.0, 2L }), + new StarTreeDocument(new Long[] { null, 4L, null, null }, new Object[] { 56.0, 48.0, 5L }), + new StarTreeDocument(new Long[] { null, null, null, null }, new Object[] { 56.0, 48.0, 5L }) + ); + return expectedStarTreeDocuments.iterator(); + } + + public void test_build() throws IOException { + + int noOfStarTreeDocuments = 5; + StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + + starTreeDocuments[0] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 12.0, 10.0, randomDouble() }); + starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 10.0, 6.0, randomDouble() }); + starTreeDocuments[2] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 14.0, 12.0, randomDouble() }); + starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 9.0, 4.0, randomDouble() }); + starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 11.0, 16.0, randomDouble() }); + + StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + for (int i = 0; i < noOfStarTreeDocuments; i++) { + long metric1 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[0]); + long metric2 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[1]); + long metric3 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[2]); + segmentStarTreeDocuments[i] = new StarTreeDocument(starTreeDocuments[i].dimensions, new Long[] { metric1, metric2, metric3 }); + } + + SequentialDocValuesIterator[] dimsIterators = getDimensionIterators(segmentStarTreeDocuments); + List metricsIterators = getMetricIterators(segmentStarTreeDocuments); + builder = getStarTreeBuilder(compositeField, writeState, mapperService); + Iterator segmentStarTreeDocumentIterator = builder.sortAndAggregateSegmentDocuments( + dimsIterators, + metricsIterators + ); + builder.build(segmentStarTreeDocumentIterator); + + List resultStarTreeDocuments = builder.getStarTreeDocuments(); + assertEquals(7, resultStarTreeDocuments.size()); + + Iterator expectedStarTreeDocumentIterator = getExpectedStarTreeDocumentIterator(); + assertStarTreeDocuments(resultStarTreeDocuments, expectedStarTreeDocumentIterator); + } + + private void assertStarTreeDocuments( + List resultStarTreeDocuments, + Iterator expectedStarTreeDocumentIterator + ) { + Iterator resultStarTreeDocumentIterator = resultStarTreeDocuments.iterator(); + while (resultStarTreeDocumentIterator.hasNext() && expectedStarTreeDocumentIterator.hasNext()) { + StarTreeDocument resultStarTreeDocument = resultStarTreeDocumentIterator.next(); + StarTreeDocument expectedStarTreeDocument = expectedStarTreeDocumentIterator.next(); + + assertEquals(expectedStarTreeDocument.dimensions[0], resultStarTreeDocument.dimensions[0]); + assertEquals(expectedStarTreeDocument.dimensions[1], resultStarTreeDocument.dimensions[1]); + assertEquals(expectedStarTreeDocument.dimensions[2], resultStarTreeDocument.dimensions[2]); + assertEquals(expectedStarTreeDocument.dimensions[3], resultStarTreeDocument.dimensions[3]); + assertEquals(expectedStarTreeDocument.metrics[0], resultStarTreeDocument.metrics[0]); + assertEquals(expectedStarTreeDocument.metrics[1], resultStarTreeDocument.metrics[1]); + assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); + } + } + + public void test_build_starTreeDataset() throws IOException { + + fields = List.of("fieldC", "fieldB", "fieldL", "fieldI"); + + dimensionsOrder = List.of(new NumericDimension("fieldC"), new NumericDimension("fieldB"), new NumericDimension("fieldL")); + metrics = List.of(new Metric("fieldI", List.of(MetricStat.SUM))); + + DocValuesProducer docValuesProducer = mock(DocValuesProducer.class); + + compositeField = new StarTreeField( + "test", + dimensionsOrder, + metrics, + new StarTreeFieldConfiguration(1, Set.of(), StarTreeFieldConfiguration.StarTreeBuildMode.ON_HEAP) + ); + SegmentInfo segmentInfo = new SegmentInfo( + directory, + Version.LATEST, + Version.LUCENE_9_11_0, + "test_segment", + 7, + false, + false, + new Lucene99Codec(), + new HashMap<>(), + UUID.randomUUID().toString().substring(0, 16).getBytes(StandardCharsets.UTF_8), + new HashMap<>(), + null + ); + + fieldsInfo = new FieldInfo[fields.size()]; + fieldProducerMap = new HashMap<>(); + for (int i = 0; i < fieldsInfo.length; i++) { + fieldsInfo[i] = new FieldInfo( + fields.get(i), + i, + false, + false, + true, + IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS, + DocValuesType.SORTED_NUMERIC, + -1, + Collections.emptyMap(), + 0, + 0, + 0, + 0, + VectorEncoding.FLOAT32, + VectorSimilarityFunction.EUCLIDEAN, + false, + false + ); + fieldProducerMap.put(fields.get(i), docValuesProducer); + } + FieldInfos fieldInfos = new FieldInfos(fieldsInfo); + writeState = new SegmentWriteState(InfoStream.getDefault(), segmentInfo.dir, segmentInfo, fieldInfos, null, newIOContext(random())); + + mapperService = mock(MapperService.class); + DocumentMapper documentMapper = mock(DocumentMapper.class); + when(mapperService.documentMapper()).thenReturn(documentMapper); + Settings settings = Settings.builder().put(settings(org.opensearch.Version.CURRENT).build()).build(); + NumberFieldMapper numberFieldMapper1 = new NumberFieldMapper.Builder("fieldI", NumberFieldMapper.NumberType.DOUBLE, false, true) + .build(new Mapper.BuilderContext(settings, new ContentPath())); + MappingLookup fieldMappers = new MappingLookup( + Set.of(numberFieldMapper1), + Collections.emptyList(), + Collections.emptyList(), + 0, + null + ); + when(documentMapper.mappers()).thenReturn(fieldMappers); + + int noOfStarTreeDocuments = 7; + StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + starTreeDocuments[0] = new StarTreeDocument(new Long[] { 1L, 11L, 21L }, new Double[] { 400.0 }); + starTreeDocuments[1] = new StarTreeDocument(new Long[] { 1L, 12L, 22L }, new Double[] { 200.0 }); + starTreeDocuments[2] = new StarTreeDocument(new Long[] { 2L, 13L, 23L }, new Double[] { 300.0 }); + starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 13L, 21L }, new Double[] { 100.0 }); + starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 11L, 21L }, new Double[] { 600.0 }); + starTreeDocuments[5] = new StarTreeDocument(new Long[] { 3L, 12L, 23L }, new Double[] { 200.0 }); + starTreeDocuments[6] = new StarTreeDocument(new Long[] { 3L, 12L, 21L }, new Double[] { 400.0 }); + + StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + for (int i = 0; i < noOfStarTreeDocuments; i++) { + long metric1 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[0]); + segmentStarTreeDocuments[i] = new StarTreeDocument(starTreeDocuments[i].dimensions, new Long[] { metric1 }); + } + + SequentialDocValuesIterator[] dimsIterators = getDimensionIterators(segmentStarTreeDocuments); + List metricsIterators = getMetricIterators(segmentStarTreeDocuments); + builder = getStarTreeBuilder(compositeField, writeState, mapperService); + Iterator segmentStarTreeDocumentIterator = builder.sortAndAggregateSegmentDocuments( + dimsIterators, + metricsIterators + ); + builder.build(segmentStarTreeDocumentIterator); + + List resultStarTreeDocuments = builder.getStarTreeDocuments(); + Iterator expectedStarTreeDocumentIterator = expectedStarTreeDocuments(); + Iterator resultStarTreeDocumentIterator = resultStarTreeDocuments.iterator(); + Map> dimValueToDocIdMap = new HashMap<>(); + builder.rootNode.isStarNode = true; + traverseStarTree(builder.rootNode, dimValueToDocIdMap, true); + + Map> expectedDimToValueMap = getExpectedDimToValueMap(); + for (Map.Entry> entry : dimValueToDocIdMap.entrySet()) { + int dimId = entry.getKey(); + if (dimId == -1) continue; + Map map = expectedDimToValueMap.get(dimId); + for (Map.Entry dimValueToDocIdEntry : entry.getValue().entrySet()) { + long dimValue = dimValueToDocIdEntry.getKey(); + int docId = dimValueToDocIdEntry.getValue(); + if (map.get(dimValue) != null) { + assertEquals(map.get(dimValue), resultStarTreeDocuments.get(docId).metrics[0]); + } + } + } + + while (resultStarTreeDocumentIterator.hasNext() && expectedStarTreeDocumentIterator.hasNext()) { + StarTreeDocument resultStarTreeDocument = resultStarTreeDocumentIterator.next(); + StarTreeDocument expectedStarTreeDocument = expectedStarTreeDocumentIterator.next(); + assertEquals(expectedStarTreeDocument.dimensions[0], resultStarTreeDocument.dimensions[0]); + assertEquals(expectedStarTreeDocument.dimensions[1], resultStarTreeDocument.dimensions[1]); + assertEquals(expectedStarTreeDocument.dimensions[2], resultStarTreeDocument.dimensions[2]); + assertEquals(expectedStarTreeDocument.metrics[0], resultStarTreeDocument.metrics[0]); + } + } + + private static Map> getExpectedDimToValueMap() { + Map> expectedDimToValueMap = new HashMap<>(); + Map dimValueMap = new HashMap<>(); + dimValueMap.put(1L, 600.0); + dimValueMap.put(2L, 400.0); + dimValueMap.put(3L, 1200.0); + expectedDimToValueMap.put(0, dimValueMap); + + dimValueMap = new HashMap<>(); + dimValueMap.put(11L, 1000.0); + dimValueMap.put(12L, 800.0); + dimValueMap.put(13L, 400.0); + expectedDimToValueMap.put(1, dimValueMap); + + dimValueMap = new HashMap<>(); + dimValueMap.put(21L, 1500.0); + dimValueMap.put(22L, 200.0); + dimValueMap.put(23L, 500.0); + expectedDimToValueMap.put(2, dimValueMap); + return expectedDimToValueMap; + } + + private Iterator expectedStarTreeDocuments() { + List expectedStarTreeDocuments = List.of( + new StarTreeDocument(new Long[] { 1L, 11L, 21L }, new Object[] { 400.0 }), + new StarTreeDocument(new Long[] { 1L, 12L, 22L }, new Object[] { 200.0 }), + new StarTreeDocument(new Long[] { 2L, 13L, 21L }, new Object[] { 100.0 }), + new StarTreeDocument(new Long[] { 2L, 13L, 23L }, new Object[] { 300.0 }), + new StarTreeDocument(new Long[] { 3L, 11L, 21L }, new Object[] { 600.0 }), + new StarTreeDocument(new Long[] { 3L, 12L, 21L }, new Object[] { 400.0 }), + new StarTreeDocument(new Long[] { 3L, 12L, 23L }, new Object[] { 200.0 }), + new StarTreeDocument(new Long[] { null, 11L, 21L }, new Object[] { 1000.0 }), + new StarTreeDocument(new Long[] { null, 12L, 21L }, new Object[] { 400.0 }), + new StarTreeDocument(new Long[] { null, 12L, 22L }, new Object[] { 200.0 }), + new StarTreeDocument(new Long[] { null, 12L, 23L }, new Object[] { 200.0 }), + new StarTreeDocument(new Long[] { null, 13L, 21L }, new Object[] { 100.0 }), + new StarTreeDocument(new Long[] { null, 13L, 23L }, new Object[] { 300.0 }), + new StarTreeDocument(new Long[] { null, null, 21L }, new Object[] { 1500.0 }), + new StarTreeDocument(new Long[] { null, null, 22L }, new Object[] { 200.0 }), + new StarTreeDocument(new Long[] { null, null, 23L }, new Object[] { 500.0 }), + new StarTreeDocument(new Long[] { null, null, null }, new Object[] { 2200.0 }), + new StarTreeDocument(new Long[] { null, 12L, null }, new Object[] { 800.0 }), + new StarTreeDocument(new Long[] { null, 13L, null }, new Object[] { 400.0 }), + new StarTreeDocument(new Long[] { 1L, null, 21L }, new Object[] { 400.0 }), + new StarTreeDocument(new Long[] { 1L, null, 22L }, new Object[] { 200.0 }), + new StarTreeDocument(new Long[] { 1L, null, null }, new Object[] { 600.0 }), + new StarTreeDocument(new Long[] { 2L, 13L, null }, new Object[] { 400.0 }), + new StarTreeDocument(new Long[] { 3L, null, 21L }, new Object[] { 1000.0 }), + new StarTreeDocument(new Long[] { 3L, null, 23L }, new Object[] { 200.0 }), + new StarTreeDocument(new Long[] { 3L, null, null }, new Object[] { 1200.0 }), + new StarTreeDocument(new Long[] { 3L, 12L, null }, new Object[] { 600.0 }) + ); + + return expectedStarTreeDocuments.iterator(); + } + + public void testFlushFlow() throws IOException { + List dimList = List.of(0L, 1L, 3L, 4L, 5L); + List docsWithField = List.of(0, 1, 3, 4, 5); + List dimList2 = List.of(0L, 1L, 2L, 3L, 4L, 5L); + List docsWithField2 = List.of(0, 1, 2, 3, 4, 5); + + List metricsList = List.of( + getLongFromDouble(0.0), + getLongFromDouble(10.0), + getLongFromDouble(20.0), + getLongFromDouble(30.0), + getLongFromDouble(40.0), + getLongFromDouble(50.0) + ); + List metricsWithField = List.of(0, 1, 2, 3, 4, 5); + + StarTreeField sf = getStarTreeFieldWithMultipleMetrics(); + SortedNumericDocValues d1sndv = getSortedNumericMock(dimList, docsWithField); + SortedNumericDocValues d2sndv = getSortedNumericMock(dimList2, docsWithField2); + SortedNumericDocValues m1sndv = getSortedNumericMock(metricsList, metricsWithField); + SortedNumericDocValues m2sndv = getSortedNumericMock(metricsList, metricsWithField); + + OnHeapStarTreeBuilder builder = new OnHeapStarTreeBuilder(sf, getWriteState(6), mapperService); + SequentialDocValuesIterator[] dimDvs = { new SequentialDocValuesIterator(d1sndv), new SequentialDocValuesIterator(d2sndv) }; + Iterator starTreeDocumentIterator = builder.sortAndAggregateSegmentDocuments( + dimDvs, + List.of(new SequentialDocValuesIterator(m1sndv), new SequentialDocValuesIterator(m2sndv)) + ); + /** + * Asserting following dim / metrics [ dim1, dim2 / Sum [metric], count [metric] ] + [0, 0] | [0.0, 1] + [1, 1] | [10.0, 1] + [3, 3] | [30.0, 1] + [4, 4] | [40.0, 1] + [5, 5] | [50.0, 1] + [null, 2] | [20.0, 1] + */ + int count = 0; + while (starTreeDocumentIterator.hasNext()) { + count++; + StarTreeDocument starTreeDocument = starTreeDocumentIterator.next(); + assertEquals( + starTreeDocument.dimensions[0] != null ? starTreeDocument.dimensions[0] * 1 * 10.0 : 20.0, + starTreeDocument.metrics[0] + ); + assertEquals(1L, starTreeDocument.metrics[1]); + } + assertEquals(6, count); + } + + public void testFlushFlowBuild() throws IOException { + List dimList = new ArrayList<>(100); + List docsWithField = new ArrayList<>(100); + for (int i = 0; i < 100; i++) { + dimList.add((long) i); + docsWithField.add(i); + } + + List dimList2 = new ArrayList<>(100); + List docsWithField2 = new ArrayList<>(100); + for (int i = 0; i < 100; i++) { + dimList2.add((long) i); + docsWithField2.add(i); + } + + List metricsList = new ArrayList<>(100); + List metricsWithField = new ArrayList<>(100); + for (int i = 0; i < 100; i++) { + metricsList.add(getLongFromDouble(i * 10.0)); + metricsWithField.add(i); + } + + Dimension d1 = new NumericDimension("field1"); + Dimension d2 = new NumericDimension("field3"); + Metric m1 = new Metric("field2", List.of(MetricStat.SUM)); + List dims = List.of(d1, d2); + List metrics = List.of(m1); + StarTreeFieldConfiguration c = new StarTreeFieldConfiguration( + 1, + new HashSet<>(), + StarTreeFieldConfiguration.StarTreeBuildMode.ON_HEAP + ); + StarTreeField sf = new StarTreeField("sf", dims, metrics, c); + SortedNumericDocValues d1sndv = getSortedNumericMock(dimList, docsWithField); + SortedNumericDocValues d2sndv = getSortedNumericMock(dimList2, docsWithField2); + SortedNumericDocValues m1sndv = getSortedNumericMock(metricsList, metricsWithField); + + BaseStarTreeBuilder builder = getStarTreeBuilder(sf, getWriteState(100), mapperService); + + DocValuesProducer d1vp = getDocValuesProducer(d1sndv); + DocValuesProducer d2vp = getDocValuesProducer(d2sndv); + DocValuesProducer m1vp = getDocValuesProducer(m1sndv); + Map fieldProducerMap = Map.of("field1", d1vp, "field3", d2vp, "field2", m1vp); + builder.build(fieldProducerMap); + /** + * Asserting following dim / metrics [ dim1, dim2 / Sum [ metric] ] + [0, 0] | [0.0] + [1, 1] | [10.0] + [2, 2] | [20.0] + [3, 3] | [30.0] + [4, 4] | [40.0] + .... + [null, 0] | [0.0] + [null, 1] | [10.0] + ... + [null, null] | [49500.0] + */ + List starTreeDocuments = builder.getStarTreeDocuments(); + for (StarTreeDocument starTreeDocument : starTreeDocuments) { + assertEquals( + starTreeDocument.dimensions[1] != null ? starTreeDocument.dimensions[1] * 10.0 : 49500.0, + starTreeDocument.metrics[0] + ); + } + builder.close(); + } + + private static DocValuesProducer getDocValuesProducer(SortedNumericDocValues sndv) { + return new EmptyDocValuesProducer() { + @Override + public SortedNumericDocValues getSortedNumeric(FieldInfo field) throws IOException { + return sndv; + } + }; + } + + private static StarTreeField getStarTreeFieldWithMultipleMetrics() { + Dimension d1 = new NumericDimension("field1"); + Dimension d2 = new NumericDimension("field3"); + Metric m1 = new Metric("field2", List.of(MetricStat.SUM)); + Metric m2 = new Metric("field2", List.of(MetricStat.COUNT)); + List dims = List.of(d1, d2); + List metrics = List.of(m1, m2); + StarTreeFieldConfiguration c = new StarTreeFieldConfiguration( + 1000, + new HashSet<>(), + StarTreeFieldConfiguration.StarTreeBuildMode.ON_HEAP + ); + StarTreeField sf = new StarTreeField("sf", dims, metrics, c); + return sf; + } + + public void testMergeFlowWithSum() throws IOException { + List dimList = List.of(0L, 1L, 3L, 4L, 5L, 6L); + List docsWithField = List.of(0, 1, 3, 4, 5, 6); + List dimList2 = List.of(0L, 1L, 2L, 3L, 4L, 5L, -1L); + List docsWithField2 = List.of(0, 1, 2, 3, 4, 5, 6); + + List metricsList = List.of( + getLongFromDouble(0.0), + getLongFromDouble(10.0), + getLongFromDouble(20.0), + getLongFromDouble(30.0), + getLongFromDouble(40.0), + getLongFromDouble(50.0), + getLongFromDouble(60.0) + + ); + List metricsWithField = List.of(0, 1, 2, 3, 4, 5, 6); + + StarTreeField sf = getStarTreeField(MetricStat.SUM); + StarTreeValues starTreeValues = getStarTreeValues( + getSortedNumericMock(dimList, docsWithField), + getSortedNumericMock(dimList2, docsWithField2), + getSortedNumericMock(metricsList, metricsWithField), + sf, + "6" + ); + + StarTreeValues starTreeValues2 = getStarTreeValues( + getSortedNumericMock(dimList, docsWithField), + getSortedNumericMock(dimList2, docsWithField2), + getSortedNumericMock(metricsList, metricsWithField), + sf, + "6" + ); + OnHeapStarTreeBuilder builder = new OnHeapStarTreeBuilder(sf, getWriteState(6), mapperService); + Iterator starTreeDocumentIterator = builder.mergeStarTrees(List.of(starTreeValues, starTreeValues2)); + /** + * Asserting following dim / metrics [ dim1, dim2 / Sum [ metric] ] + * [0, 0] | [0.0] + * [1, 1] | [20.0] + * [3, 3] | [60.0] + * [4, 4] | [80.0] + * [5, 5] | [100.0] + * [null, 2] | [40.0] + * ------------------ We only take non star docs + * [6,-1] | [120.0] + */ + int count = 0; + while (starTreeDocumentIterator.hasNext()) { + count++; + StarTreeDocument starTreeDocument = starTreeDocumentIterator.next(); + assertEquals( + starTreeDocument.dimensions[0] != null ? starTreeDocument.dimensions[0] * 2 * 10.0 : 40.0, + starTreeDocument.metrics[0] + ); + } + assertEquals(6, count); + } + + public void testMergeFlowWithCount() throws IOException { + List dimList = List.of(0L, 1L, 3L, 4L, 5L, 6L); + List docsWithField = List.of(0, 1, 3, 4, 5, 6); + List dimList2 = List.of(0L, 1L, 2L, 3L, 4L, 5L, -1L); + List docsWithField2 = List.of(0, 1, 2, 3, 4, 5, 6); + + List metricsList = List.of(0L, 1L, 2L, 3L, 4L, 5L, 6L); + List metricsWithField = List.of(0, 1, 2, 3, 4, 5, 6); + + StarTreeField sf = getStarTreeField(MetricStat.COUNT); + StarTreeValues starTreeValues = getStarTreeValues( + getSortedNumericMock(dimList, docsWithField), + getSortedNumericMock(dimList2, docsWithField2), + getSortedNumericMock(metricsList, metricsWithField), + sf, + "6" + ); + + StarTreeValues starTreeValues2 = getStarTreeValues( + getSortedNumericMock(dimList, docsWithField), + getSortedNumericMock(dimList2, docsWithField2), + getSortedNumericMock(metricsList, metricsWithField), + sf, + "6" + ); + OnHeapStarTreeBuilder builder = new OnHeapStarTreeBuilder(sf, getWriteState(6), mapperService); + Iterator starTreeDocumentIterator = builder.mergeStarTrees(List.of(starTreeValues, starTreeValues2)); + /** + * Asserting following dim / metrics [ dim1, dim2 / Count [ metric] ] + [0, 0] | [0] + [1, 1] | [2] + [3, 3] | [6] + [4, 4] | [8] + [5, 5] | [10] + [null, 2] | [4] + --------------- + [6,-1] | [12] + */ + int count = 0; + while (starTreeDocumentIterator.hasNext()) { + count++; + StarTreeDocument starTreeDocument = starTreeDocumentIterator.next(); + assertEquals(starTreeDocument.dimensions[0] != null ? starTreeDocument.dimensions[0] * 2 : 4, starTreeDocument.metrics[0]); + } + assertEquals(6, count); + } + + private StarTreeValues getStarTreeValues( + SortedNumericDocValues dimList, + SortedNumericDocValues dimList2, + SortedNumericDocValues metricsList, + StarTreeField sf, + String number + ) { + SortedNumericDocValues d1sndv = dimList; + SortedNumericDocValues d2sndv = dimList2; + SortedNumericDocValues m1sndv = metricsList; + Map dimDocIdSetIterators = Map.of("field1", d1sndv, "field3", d2sndv); + Map metricDocIdSetIterators = Map.of("field2", m1sndv); + StarTreeValues starTreeValues = new StarTreeValues( + sf, + null, + dimDocIdSetIterators, + metricDocIdSetIterators, + Map.of("numSegmentDocs", number) + ); + return starTreeValues; + } + + public void testMergeFlowWithDifferentDocsFromSegments() throws IOException { + List dimList = List.of(0L, 1L, 3L, 4L, 5L, 6L); + List docsWithField = List.of(0, 1, 3, 4, 5, 6); + List dimList2 = List.of(0L, 1L, 2L, 3L, 4L, 5L, -1L); + List docsWithField2 = List.of(0, 1, 2, 3, 4, 5, 6); + + List metricsList = List.of(0L, 1L, 2L, 3L, 4L, 5L, 6L); + List metricsWithField = List.of(0, 1, 2, 3, 4, 5, 6); + + List dimList3 = List.of(5L, 6L, 8L, -1L); + List docsWithField3 = List.of(0, 1, 3, 4); + List dimList4 = List.of(5L, 6L, 7L, 8L, -1L); + List docsWithField4 = List.of(0, 1, 2, 3, 4); + + List metricsList2 = List.of(5L, 6L, 7L, 8L, 9L); + List metricsWithField2 = List.of(0, 1, 2, 3, 4); + + StarTreeField sf = getStarTreeField(MetricStat.COUNT); + StarTreeValues starTreeValues = getStarTreeValues( + getSortedNumericMock(dimList, docsWithField), + getSortedNumericMock(dimList2, docsWithField2), + getSortedNumericMock(metricsList, metricsWithField), + sf, + "6" + ); + + StarTreeValues starTreeValues2 = getStarTreeValues( + getSortedNumericMock(dimList3, docsWithField3), + getSortedNumericMock(dimList4, docsWithField4), + getSortedNumericMock(metricsList2, metricsWithField2), + sf, + "4" + ); + OnHeapStarTreeBuilder builder = new OnHeapStarTreeBuilder(sf, getWriteState(4), mapperService); + Iterator starTreeDocumentIterator = builder.mergeStarTrees(List.of(starTreeValues, starTreeValues2)); + /** + * Asserting following dim / metrics [ dim1, dim2 / Count [ metric] ] + [0, 0] | [0] + [1, 1] | [1] + [3, 3] | [3] + [4, 4] | [4] + [5, 5] | [10] + [6, 6] | [6] + [8, 8] | [8] + [null, 2] | [2] + [null, 7] | [7] + */ + int count = 0; + while (starTreeDocumentIterator.hasNext()) { + count++; + StarTreeDocument starTreeDocument = starTreeDocumentIterator.next(); + if (Objects.equals(starTreeDocument.dimensions[0], 5L)) { + assertEquals(starTreeDocument.dimensions[0] * 2, starTreeDocument.metrics[0]); + } else { + assertEquals(starTreeDocument.dimensions[1], starTreeDocument.metrics[0]); + } + } + assertEquals(9, count); + } + + public void testMergeFlowWithMissingDocs() throws IOException { + List dimList = List.of(0L, 1L, 2L, 3L, 4L, 6L); + List docsWithField = List.of(0, 1, 2, 3, 4, 6); + List dimList2 = List.of(0L, 1L, 2L, 3L, 4L, 5L, -1L); + List docsWithField2 = List.of(0, 1, 2, 3, 4, 5, 6); + + List metricsList = List.of(0L, 1L, 2L, 3L, 4L, 5L, 6L); + List metricsWithField = List.of(0, 1, 2, 3, 4, 5, 6); + + List dimList3 = List.of(5L, 6L, 8L, -1L); + List docsWithField3 = List.of(0, 1, 3, 4); + List dimList4 = List.of(5L, 6L, 7L, 8L, -1L); + List docsWithField4 = List.of(0, 1, 2, 3, 4); + + List metricsList2 = List.of(5L, 6L, 7L, 8L, 9L); + List metricsWithField2 = List.of(0, 1, 2, 3, 4); + + StarTreeField sf = getStarTreeField(MetricStat.COUNT); + StarTreeValues starTreeValues = getStarTreeValues( + getSortedNumericMock(dimList, docsWithField), + getSortedNumericMock(dimList2, docsWithField2), + getSortedNumericMock(metricsList, metricsWithField), + sf, + "6" + ); + + StarTreeValues starTreeValues2 = getStarTreeValues( + getSortedNumericMock(dimList3, docsWithField3), + getSortedNumericMock(dimList4, docsWithField4), + getSortedNumericMock(metricsList2, metricsWithField2), + sf, + "4" + ); + OnHeapStarTreeBuilder builder = new OnHeapStarTreeBuilder(sf, getWriteState(4), mapperService); + Iterator starTreeDocumentIterator = builder.mergeStarTrees(List.of(starTreeValues, starTreeValues2)); + /** + * Asserting following dim / metrics [ dim1, dim2 / Count [ metric] ] + [0, 0] | [0] + [1, 1] | [1] + [2, 2] | [2] + [3, 3] | [3] + [4, 4] | [4] + [5, 5] | [5] + [6, 6] | [6] + [8, 8] | [8] + [null, 5] | [5] + [null, 7] | [7] + */ + int count = 0; + while (starTreeDocumentIterator.hasNext()) { + count++; + StarTreeDocument starTreeDocument = starTreeDocumentIterator.next(); + if (starTreeDocument.dimensions[0] == null) { + assertTrue(List.of(5L, 7L).contains(starTreeDocument.dimensions[1])); + } + assertEquals(starTreeDocument.dimensions[1], starTreeDocument.metrics[0]); + } + assertEquals(10, count); + } + + public void testMergeFlowWithMissingDocsInSecondDim() throws IOException { + List dimList2 = List.of(0L, 1L, 2L, 3L, 4L, 6L); + List docsWithField2 = List.of(0, 1, 2, 3, 4, 6); + List dimList = List.of(0L, 1L, 2L, 3L, 4L, 5L, -1L); + List docsWithField = List.of(0, 1, 2, 3, 4, 5, 6); + + List metricsList = List.of(0L, 1L, 2L, 3L, 4L, 5L, 6L); + List metricsWithField = List.of(0, 1, 2, 3, 4, 5, 6); + + List dimList3 = List.of(5L, 6L, 8L, -1L); + List docsWithField3 = List.of(0, 1, 3, 4); + List dimList4 = List.of(5L, 6L, 7L, 8L, -1L); + List docsWithField4 = List.of(0, 1, 2, 3, 4); + + List metricsList2 = List.of(5L, 6L, 7L, 8L, 9L); + List metricsWithField2 = List.of(0, 1, 2, 3, 4); + + StarTreeField sf = getStarTreeField(MetricStat.COUNT); + StarTreeValues starTreeValues = getStarTreeValues( + getSortedNumericMock(dimList, docsWithField), + getSortedNumericMock(dimList2, docsWithField2), + getSortedNumericMock(metricsList, metricsWithField), + sf, + "6" + ); + + StarTreeValues starTreeValues2 = getStarTreeValues( + getSortedNumericMock(dimList3, docsWithField3), + getSortedNumericMock(dimList4, docsWithField4), + getSortedNumericMock(metricsList2, metricsWithField2), + sf, + "4" + ); + OnHeapStarTreeBuilder builder = new OnHeapStarTreeBuilder(sf, getWriteState(4), mapperService); + Iterator starTreeDocumentIterator = builder.mergeStarTrees(List.of(starTreeValues, starTreeValues2)); + /** + * Asserting following dim / metrics [ dim1, dim2 / Count [ metric] ] + [0, 0] | [0] + [1, 1] | [1] + [2, 2] | [2] + [3, 3] | [3] + [4, 4] | [4] + [5, 5] | [5] + [5, null] | [5] + [6, 6] | [6] + [8, 8] | [8] + [null, 7] | [7] + */ + int count = 0; + while (starTreeDocumentIterator.hasNext()) { + count++; + StarTreeDocument starTreeDocument = starTreeDocumentIterator.next(); + if (starTreeDocument.dimensions[0] != null && starTreeDocument.dimensions[0] == 5) { + assertEquals(starTreeDocument.dimensions[0], starTreeDocument.metrics[0]); + } else { + assertEquals(starTreeDocument.dimensions[1], starTreeDocument.metrics[0]); + } + } + assertEquals(10, count); + } + + public void testMergeFlowWithDocsMissingAtTheEnd() throws IOException { + List dimList = List.of(0L, 1L, 2L, 3L, 4L); + List docsWithField = List.of(0, 1, 2, 3, 4); + List dimList2 = List.of(0L, 1L, 2L, 3L, 4L, 5L, -1L); + List docsWithField2 = List.of(0, 1, 2, 3, 4, 5, 6); + + List metricsList = List.of(0L, 1L, 2L, 3L, 4L, 5L, 6L); + List metricsWithField = List.of(0, 1, 2, 3, 4, 5, 6); + + List dimList3 = List.of(5L, 6L, 8L, -1L); + List docsWithField3 = List.of(0, 1, 3, 4); + List dimList4 = List.of(5L, 6L, 7L, 8L, -1L); + List docsWithField4 = List.of(0, 1, 2, 3, 4); + + List metricsList2 = List.of(5L, 6L, 7L, 8L, 9L); + List metricsWithField2 = List.of(0, 1, 2, 3, 4); + + StarTreeField sf = getStarTreeField(MetricStat.COUNT); + StarTreeValues starTreeValues = getStarTreeValues( + getSortedNumericMock(dimList, docsWithField), + getSortedNumericMock(dimList2, docsWithField2), + getSortedNumericMock(metricsList, metricsWithField), + sf, + "6" + ); + + StarTreeValues starTreeValues2 = getStarTreeValues( + getSortedNumericMock(dimList3, docsWithField3), + getSortedNumericMock(dimList4, docsWithField4), + getSortedNumericMock(metricsList2, metricsWithField2), + sf, + "4" + ); + OnHeapStarTreeBuilder builder = new OnHeapStarTreeBuilder(sf, writeState, mapperService); + Iterator starTreeDocumentIterator = builder.mergeStarTrees(List.of(starTreeValues, starTreeValues2)); + /** + * Asserting following dim / metrics [ dim1, dim2 / Count [ metric] ] + [0, 0] | [0] + [1, 1] | [1] + [2, 2] | [2] + [3, 3] | [3] + [4, 4] | [4] + [5, 5] | [5] + [6, 6] | [6] + [8, 8] | [8] + [null, 5] | [5] + [null, 7] | [7] + */ + int count = 0; + while (starTreeDocumentIterator.hasNext()) { + count++; + StarTreeDocument starTreeDocument = starTreeDocumentIterator.next(); + if (starTreeDocument.dimensions[0] == null) { + assertTrue(List.of(5L, 7L).contains(starTreeDocument.dimensions[1])); + } + assertEquals(starTreeDocument.dimensions[1], starTreeDocument.metrics[0]); + } + assertEquals(10, count); + } + + public void testMergeFlowWithEmptyFieldsInOneSegment() throws IOException { + List dimList = List.of(0L, 1L, 2L, 3L, 4L); + List docsWithField = List.of(0, 1, 2, 3, 4); + List dimList2 = List.of(0L, 1L, 2L, 3L, 4L, 5L, -1L); + List docsWithField2 = List.of(0, 1, 2, 3, 4, 5, 6); + + List metricsList = List.of(0L, 1L, 2L, 3L, 4L, 5L, 6L); + List metricsWithField = List.of(0, 1, 2, 3, 4, 5, 6); + + StarTreeField sf = getStarTreeField(MetricStat.COUNT); + StarTreeValues starTreeValues = getStarTreeValues( + getSortedNumericMock(dimList, docsWithField), + getSortedNumericMock(dimList2, docsWithField2), + getSortedNumericMock(metricsList, metricsWithField), + sf, + "6" + ); + + StarTreeValues starTreeValues2 = getStarTreeValues( + DocValues.emptySortedNumeric(), + DocValues.emptySortedNumeric(), + DocValues.emptySortedNumeric(), + sf, + "0" + ); + OnHeapStarTreeBuilder builder = new OnHeapStarTreeBuilder(sf, getWriteState(0), mapperService); + Iterator starTreeDocumentIterator = builder.mergeStarTrees(List.of(starTreeValues, starTreeValues2)); + /** + * Asserting following dim / metrics [ dim1, dim2 / Count [ metric] ] + [0, 0] | [0] + [1, 1] | [1] + [2, 2] | [2] + [3, 3] | [3] + [4, 4] | [4] + [null, 5] | [5] + */ + int count = 0; + while (starTreeDocumentIterator.hasNext()) { + count++; + StarTreeDocument starTreeDocument = starTreeDocumentIterator.next(); + if (starTreeDocument.dimensions[0] == null) { + assertEquals(5L, (long) starTreeDocument.dimensions[1]); + } + assertEquals(starTreeDocument.dimensions[1], starTreeDocument.metrics[0]); + } + assertEquals(6, count); + } + + public void testMergeFlowWithDuplicateDimensionValues() throws IOException { + List dimList1 = new ArrayList<>(500); + List docsWithField1 = new ArrayList<>(500); + for (int i = 0; i < 100; i++) { + for (int j = 0; j < 5; j++) { + dimList1.add((long) i); + docsWithField1.add(i * 5 + j); + } + } + + List dimList2 = new ArrayList<>(500); + List docsWithField2 = new ArrayList<>(500); + for (int i = 0; i < 100; i++) { + for (int j = 0; j < 5; j++) { + dimList2.add((long) i); + docsWithField2.add(i * 5 + j); + } + } + + List dimList3 = new ArrayList<>(500); + List docsWithField3 = new ArrayList<>(500); + for (int i = 0; i < 100; i++) { + for (int j = 0; j < 5; j++) { + dimList3.add((long) i); + docsWithField3.add(i * 5 + j); + } + } + + List dimList4 = new ArrayList<>(500); + List docsWithField4 = new ArrayList<>(500); + for (int i = 0; i < 100; i++) { + for (int j = 0; j < 5; j++) { + dimList4.add((long) i); + docsWithField4.add(i * 5 + j); + } + } + + List metricsList = new ArrayList<>(100); + List metricsWithField = new ArrayList<>(100); + for (int i = 0; i < 500; i++) { + metricsList.add(getLongFromDouble(i * 10.0)); + metricsWithField.add(i); + } + + StarTreeField sf = getStarTreeField(1); + StarTreeValues starTreeValues = getStarTreeValues( + dimList1, + docsWithField1, + dimList2, + docsWithField2, + dimList3, + docsWithField3, + dimList4, + docsWithField4, + metricsList, + metricsWithField, + sf + ); + + StarTreeValues starTreeValues2 = getStarTreeValues( + dimList1, + docsWithField1, + dimList2, + docsWithField2, + dimList3, + docsWithField3, + dimList4, + docsWithField4, + metricsList, + metricsWithField, + sf + ); + OnHeapStarTreeBuilder builder = new OnHeapStarTreeBuilder(sf, writeState, mapperService); + builder.build(List.of(starTreeValues, starTreeValues2)); + List starTreeDocuments = builder.getStarTreeDocuments(); + assertEquals(401, starTreeDocuments.size()); + int count = 0; + double sum = 0; + /** + 401 docs get generated + [0, 0, 0, 0] | [200.0] + [1, 1, 1, 1] | [700.0] + [2, 2, 2, 2] | [1200.0] + [3, 3, 3, 3] | [1700.0] + [4, 4, 4, 4] | [2200.0] + ..... + [null, null, null, 99] | [49700.0] + [null, null, null, null] | [2495000.0] + */ + for (StarTreeDocument starTreeDocument : starTreeDocuments) { + if (starTreeDocument.dimensions[3] == null) { + assertEquals(sum, starTreeDocument.metrics[0]); + } else { + if (starTreeDocument.dimensions[0] != null) { + sum += (double) starTreeDocument.metrics[0]; + } + assertEquals(starTreeDocument.dimensions[3] * 500 + 200.0, starTreeDocument.metrics[0]); + } + count++; + } + assertEquals(401, count); + builder.close(); + } + + public void testMergeFlowWithMaxLeafDocs() throws IOException { + List dimList1 = new ArrayList<>(500); + List docsWithField1 = new ArrayList<>(500); + + for (int i = 0; i < 20; i++) { + for (int j = 0; j < 20; j++) { + dimList1.add((long) i); + docsWithField1.add(i * 20 + j); + } + } + for (int i = 80; i < 100; i++) { + for (int j = 0; j < 5; j++) { + dimList1.add((long) i); + docsWithField1.add(i * 5 + j); + } + } + List dimList3 = new ArrayList<>(500); + List docsWithField3 = new ArrayList<>(500); + for (int i = 0; i < 100; i++) { + for (int j = 0; j < 5; j++) { + dimList3.add((long) i); + docsWithField3.add(i * 5 + j); + } + } + List dimList2 = new ArrayList<>(500); + List docsWithField2 = new ArrayList<>(500); + for (int i = 0; i < 10; i++) { + for (int j = 0; j < 50; j++) { + dimList2.add((long) i); + docsWithField2.add(i * 50 + j); + } + } + + List dimList4 = new ArrayList<>(500); + List docsWithField4 = new ArrayList<>(500); + for (int i = 0; i < 100; i++) { + for (int j = 0; j < 5; j++) { + dimList4.add((long) i); + docsWithField4.add(i * 5 + j); + } + } + + List metricsList = new ArrayList<>(100); + List metricsWithField = new ArrayList<>(100); + for (int i = 0; i < 500; i++) { + metricsList.add(getLongFromDouble(i * 10.0)); + metricsWithField.add(i); + } + + StarTreeField sf = getStarTreeField(3); + StarTreeValues starTreeValues = getStarTreeValues( + dimList1, + docsWithField1, + dimList2, + docsWithField2, + dimList3, + docsWithField3, + dimList4, + docsWithField4, + metricsList, + metricsWithField, + sf + ); + + StarTreeValues starTreeValues2 = getStarTreeValues( + dimList1, + docsWithField1, + dimList2, + docsWithField2, + dimList3, + docsWithField3, + dimList4, + docsWithField4, + metricsList, + metricsWithField, + sf + ); + + OnHeapStarTreeBuilder builder = new OnHeapStarTreeBuilder(sf, writeState, mapperService); + builder.build(List.of(starTreeValues, starTreeValues2)); + List starTreeDocuments = builder.getStarTreeDocuments(); + /** + 635 docs get generated + [0, 0, 0, 0] | [200.0] + [1, 1, 1, 1] | [700.0] + [2, 2, 2, 2] | [1200.0] + [3, 3, 3, 3] | [1700.0] + [4, 4, 4, 4] | [2200.0] + ..... + [null, null, null, 99] | [49700.0] + ..... + [null, null, null, null] | [2495000.0] + */ + assertEquals(635, starTreeDocuments.size()); + builder.close(); + } + + private StarTreeValues getStarTreeValues( + List dimList1, + List docsWithField1, + List dimList2, + List docsWithField2, + List dimList3, + List docsWithField3, + List dimList4, + List docsWithField4, + List metricsList, + List metricsWithField, + StarTreeField sf + ) { + SortedNumericDocValues d1sndv = getSortedNumericMock(dimList1, docsWithField1); + SortedNumericDocValues d2sndv = getSortedNumericMock(dimList2, docsWithField2); + SortedNumericDocValues d3sndv = getSortedNumericMock(dimList3, docsWithField3); + SortedNumericDocValues d4sndv = getSortedNumericMock(dimList4, docsWithField4); + SortedNumericDocValues m1sndv = getSortedNumericMock(metricsList, metricsWithField); + Map dimDocIdSetIterators = Map.of("field1", d1sndv, "field3", d2sndv, "field5", d3sndv, "field8", d4sndv); + Map metricDocIdSetIterators = Map.of("field2", m1sndv); + StarTreeValues starTreeValues = new StarTreeValues(sf, null, dimDocIdSetIterators, metricDocIdSetIterators, getAttributes(500)); + return starTreeValues; + } + + public void testMergeFlowWithDuplicateDimensionValueWithMaxLeafDocs() throws IOException { + List dimList1 = new ArrayList<>(500); + List docsWithField1 = new ArrayList<>(500); + + for (int i = 0; i < 20; i++) { + for (int j = 0; j < 20; j++) { + dimList1.add((long) i); + docsWithField1.add(i * 20 + j); + } + } + for (int i = 80; i < 100; i++) { + for (int j = 0; j < 5; j++) { + dimList1.add((long) i); + docsWithField1.add(i * 5 + j); + } + } + List dimList3 = new ArrayList<>(500); + List docsWithField3 = new ArrayList<>(500); + for (int i = 0; i < 100; i++) { + for (int j = 0; j < 5; j++) { + dimList3.add((long) i); + docsWithField3.add(i * 5 + j); + } + } + List dimList2 = new ArrayList<>(500); + List docsWithField2 = new ArrayList<>(500); + for (int i = 0; i < 500; i++) { + dimList2.add((long) 1); + docsWithField2.add(i); + } + + List dimList4 = new ArrayList<>(500); + List docsWithField4 = new ArrayList<>(500); + for (int i = 0; i < 100; i++) { + for (int j = 0; j < 5; j++) { + dimList4.add((long) i); + docsWithField4.add(i * 5 + j); + } + } + + List metricsList = new ArrayList<>(100); + List metricsWithField = new ArrayList<>(100); + for (int i = 0; i < 500; i++) { + metricsList.add(getLongFromDouble(i * 10.0)); + metricsWithField.add(i); + } + + StarTreeField sf = getStarTreeField(3); + StarTreeValues starTreeValues = getStarTreeValues( + dimList1, + docsWithField1, + dimList2, + docsWithField2, + dimList3, + docsWithField3, + dimList4, + docsWithField4, + metricsList, + metricsWithField, + sf + ); + + StarTreeValues starTreeValues2 = getStarTreeValues( + dimList1, + docsWithField1, + dimList2, + docsWithField2, + dimList3, + docsWithField3, + dimList4, + docsWithField4, + metricsList, + metricsWithField, + sf + ); + OnHeapStarTreeBuilder builder = new OnHeapStarTreeBuilder(sf, writeState, mapperService); + builder.build(List.of(starTreeValues, starTreeValues2)); + List starTreeDocuments = builder.getStarTreeDocuments(); + assertEquals(401, starTreeDocuments.size()); + builder.close(); + } + + public static long getLongFromDouble(double value) { + return Double.doubleToLongBits(value); + } + + public void testMergeFlowWithMaxLeafDocsAndStarTreeNodesAssertion() throws IOException { + List dimList1 = new ArrayList<>(500); + List docsWithField1 = new ArrayList<>(500); + Map> expectedDimToValueMap = new HashMap<>(); + Map dimValueMap = new HashMap<>(); + for (int i = 0; i < 20; i++) { + for (int j = 0; j < 20; j++) { + dimList1.add((long) i); + docsWithField1.add(i * 20 + j); + } + // metric = no of docs * 10.0 + dimValueMap.put((long) i, 200.0); + } + for (int i = 80; i < 100; i++) { + for (int j = 0; j < 5; j++) { + dimList1.add((long) i); + docsWithField1.add(i * 5 + j); + } + // metric = no of docs * 10.0 + dimValueMap.put((long) i, 50.0); + } + dimValueMap.put(Long.MAX_VALUE, 5000.0); + expectedDimToValueMap.put(0, dimValueMap); + dimValueMap = new HashMap<>(); + List dimList3 = new ArrayList<>(500); + List docsWithField3 = new ArrayList<>(500); + for (int i = 0; i < 500; i++) { + dimList3.add((long) 1); + docsWithField3.add(i); + dimValueMap.put((long) i, 10.0); + } + dimValueMap.put(Long.MAX_VALUE, 5000.0); + expectedDimToValueMap.put(2, dimValueMap); + dimValueMap = new HashMap<>(); + List dimList2 = new ArrayList<>(500); + List docsWithField2 = new ArrayList<>(500); + for (int i = 0; i < 500; i++) { + dimList2.add((long) i); + docsWithField2.add(i); + dimValueMap.put((long) i, 10.0); + } + dimValueMap.put(Long.MAX_VALUE, 200.0); + expectedDimToValueMap.put(1, dimValueMap); + dimValueMap = new HashMap<>(); + List dimList4 = new ArrayList<>(500); + List docsWithField4 = new ArrayList<>(500); + for (int i = 0; i < 500; i++) { + dimList4.add((long) 1); + docsWithField4.add(i); + dimValueMap.put((long) i, 10.0); + } + dimValueMap.put(Long.MAX_VALUE, 5000.0); + expectedDimToValueMap.put(3, dimValueMap); + List metricsList = new ArrayList<>(100); + List metricsWithField = new ArrayList<>(100); + for (int i = 0; i < 500; i++) { + metricsList.add(getLongFromDouble(10.0)); + metricsWithField.add(i); + } + + StarTreeField sf = getStarTreeField(10); + StarTreeValues starTreeValues = getStarTreeValues( + dimList1, + docsWithField1, + dimList2, + docsWithField2, + dimList3, + docsWithField3, + dimList4, + docsWithField4, + metricsList, + metricsWithField, + sf + ); + + StarTreeValues starTreeValues2 = getStarTreeValues( + dimList1, + docsWithField1, + dimList2, + docsWithField2, + dimList3, + docsWithField3, + dimList4, + docsWithField4, + metricsList, + metricsWithField, + sf + ); + OnHeapStarTreeBuilder builder = new OnHeapStarTreeBuilder(sf, writeState, mapperService); + builder.build(List.of(starTreeValues, starTreeValues2)); + List starTreeDocuments = builder.getStarTreeDocuments(); + Map> dimValueToDocIdMap = new HashMap<>(); + traverseStarTree(builder.rootNode, dimValueToDocIdMap, true); + for (Map.Entry> entry : dimValueToDocIdMap.entrySet()) { + int dimId = entry.getKey(); + if (dimId == -1) continue; + Map map = expectedDimToValueMap.get(dimId); + for (Map.Entry dimValueToDocIdEntry : entry.getValue().entrySet()) { + long dimValue = dimValueToDocIdEntry.getKey(); + int docId = dimValueToDocIdEntry.getValue(); + assertEquals(map.get(dimValue) * 2, starTreeDocuments.get(docId).metrics[0]); + } + } + assertEquals(1041, starTreeDocuments.size()); + builder.close(); + } + + private static StarTreeField getStarTreeField(int maxLeafDocs) { + Dimension d1 = new NumericDimension("field1"); + Dimension d2 = new NumericDimension("field3"); + Dimension d3 = new NumericDimension("field5"); + Dimension d4 = new NumericDimension("field8"); + List dims = List.of(d1, d2, d3, d4); + Metric m1 = new Metric("field2", List.of(MetricStat.SUM)); + List metrics = List.of(m1); + StarTreeFieldConfiguration c = new StarTreeFieldConfiguration( + maxLeafDocs, + new HashSet<>(), + StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP + ); + StarTreeField sf = new StarTreeField("sf", dims, metrics, c); + return sf; + } + + private void traverseStarTree(TreeNode root, Map> dimValueToDocIdMap, boolean traverStarNodes) { + TreeNode starTree = root; + // Use BFS to traverse the star tree + Queue queue = new ArrayDeque<>(); + queue.add(starTree); + int currentDimensionId = -1; + TreeNode starTreeNode; + List docIds = new ArrayList<>(); + while ((starTreeNode = queue.poll()) != null) { + int dimensionId = starTreeNode.dimensionId; + if (dimensionId > currentDimensionId) { + currentDimensionId = dimensionId; + } + + // store aggregated document of the node + int docId = starTreeNode.aggregatedDocId; + Map map = dimValueToDocIdMap.getOrDefault(dimensionId, new HashMap<>()); + if (starTreeNode.isStarNode) { + map.put(Long.MAX_VALUE, docId); + } else { + map.put(starTreeNode.dimensionValue, docId); + } + dimValueToDocIdMap.put(dimensionId, map); + + if (starTreeNode.children != null && (!traverStarNodes || starTreeNode.isStarNode)) { + Iterator childrenIterator = starTreeNode.children.values().iterator(); + while (childrenIterator.hasNext()) { + TreeNode childNode = childrenIterator.next(); + queue.add(childNode); + } + } + } + } + + public void testMergeFlow() throws IOException { + List dimList1 = new ArrayList<>(1000); + List docsWithField1 = new ArrayList<>(1000); + for (int i = 0; i < 1000; i++) { + dimList1.add((long) i); + docsWithField1.add(i); + } + + List dimList2 = new ArrayList<>(1000); + List docsWithField2 = new ArrayList<>(1000); + for (int i = 0; i < 1000; i++) { + dimList2.add((long) i); + docsWithField2.add(i); + } + + List dimList3 = new ArrayList<>(1000); + List docsWithField3 = new ArrayList<>(1000); + for (int i = 0; i < 1000; i++) { + dimList3.add((long) i); + docsWithField3.add(i); + } + + List dimList4 = new ArrayList<>(1000); + List docsWithField4 = new ArrayList<>(1000); + for (int i = 0; i < 1000; i++) { + dimList4.add((long) i); + docsWithField4.add(i); + } + + List dimList5 = new ArrayList<>(1000); + List docsWithField5 = new ArrayList<>(1000); + for (int i = 0; i < 1000; i++) { + dimList5.add((long) i); + docsWithField5.add(i); + } + + List metricsList = new ArrayList<>(1000); + List metricsWithField = new ArrayList<>(1000); + for (int i = 0; i < 1000; i++) { + metricsList.add(getLongFromDouble(i * 10.0)); + metricsWithField.add(i); + } + + Dimension d1 = new NumericDimension("field1"); + Dimension d2 = new NumericDimension("field3"); + Dimension d3 = new NumericDimension("field5"); + Dimension d4 = new NumericDimension("field8"); + // Dimension d5 = new NumericDimension("field5"); + Metric m1 = new Metric("field2", List.of(MetricStat.SUM)); + List dims = List.of(d1, d2, d3, d4); + List metrics = List.of(m1); + StarTreeFieldConfiguration c = new StarTreeFieldConfiguration( + 1, + new HashSet<>(), + StarTreeFieldConfiguration.StarTreeBuildMode.ON_HEAP + ); + StarTreeField sf = new StarTreeField("sf", dims, metrics, c); + SortedNumericDocValues d1sndv = getSortedNumericMock(dimList1, docsWithField1); + SortedNumericDocValues d2sndv = getSortedNumericMock(dimList2, docsWithField2); + SortedNumericDocValues d3sndv = getSortedNumericMock(dimList3, docsWithField3); + SortedNumericDocValues d4sndv = getSortedNumericMock(dimList4, docsWithField4); + SortedNumericDocValues m1sndv = getSortedNumericMock(metricsList, metricsWithField); + Map dimDocIdSetIterators = Map.of("field1", d1sndv, "field3", d2sndv, "field5", d3sndv, "field8", d4sndv); + Map metricDocIdSetIterators = Map.of("field2", m1sndv); + StarTreeValues starTreeValues = new StarTreeValues(sf, null, dimDocIdSetIterators, metricDocIdSetIterators, getAttributes(1000)); + + SortedNumericDocValues f2d1sndv = getSortedNumericMock(dimList1, docsWithField1); + SortedNumericDocValues f2d2sndv = getSortedNumericMock(dimList2, docsWithField2); + SortedNumericDocValues f2d3sndv = getSortedNumericMock(dimList3, docsWithField3); + SortedNumericDocValues f2d4sndv = getSortedNumericMock(dimList4, docsWithField4); + SortedNumericDocValues f2m1sndv = getSortedNumericMock(metricsList, metricsWithField); + Map f2dimDocIdSetIterators = Map.of( + "field1", + f2d1sndv, + "field3", + f2d2sndv, + "field5", + f2d3sndv, + "field8", + f2d4sndv + ); + Map f2metricDocIdSetIterators = Map.of("field2", f2m1sndv); + StarTreeValues starTreeValues2 = new StarTreeValues( + sf, + null, + f2dimDocIdSetIterators, + f2metricDocIdSetIterators, + getAttributes(1000) + ); + + BaseStarTreeBuilder builder = getStarTreeBuilder(sf, writeState, mapperService); + Iterator starTreeDocumentIterator = builder.mergeStarTrees(List.of(starTreeValues, starTreeValues2)); + /** + [0, 0, 0, 0] | [0.0] + [1, 1, 1, 1] | [20.0] + [2, 2, 2, 2] | [40.0] + [3, 3, 3, 3] | [60.0] + [4, 4, 4, 4] | [80.0] + [5, 5, 5, 5] | [100.0] + ... + [999, 999, 999, 999] | [19980.0] + */ + while (starTreeDocumentIterator.hasNext()) { + StarTreeDocument starTreeDocument = starTreeDocumentIterator.next(); + assertEquals(starTreeDocument.dimensions[0] * 20.0, starTreeDocument.metrics[0]); + } + builder.close(); + } + + Map getAttributes(int numSegmentDocs) { + return Map.of(String.valueOf(NUM_SEGMENT_DOCS), String.valueOf(numSegmentDocs)); + } + + private static StarTreeField getStarTreeField(MetricStat count) { + Dimension d1 = new NumericDimension("field1"); + Dimension d2 = new NumericDimension("field3"); + Metric m1 = new Metric("field2", List.of(count)); + List dims = List.of(d1, d2); + List metrics = List.of(m1); + StarTreeFieldConfiguration c = new StarTreeFieldConfiguration( + 1000, + new HashSet<>(), + StarTreeFieldConfiguration.StarTreeBuildMode.ON_HEAP + ); + return new StarTreeField("sf", dims, metrics, c); + } + + private Long getLongFromDouble(Double num) { + if (num == null) { + return null; + } + return NumericUtils.doubleToSortableLong(num); + } + + SortedNumericDocValues getSortedNumericMock(List dimList, List docsWithField) { + return new SortedNumericDocValues() { + int index = -1; + + @Override + public long nextValue() { + return dimList.get(index); + } + + @Override + public int docValueCount() { + return 0; + } + + @Override + public boolean advanceExact(int target) { + return false; + } + + @Override + public int docID() { + return index; + } + + @Override + public int nextDoc() { + if (index == docsWithField.size() - 1) { + return NO_MORE_DOCS; + } + index++; + return docsWithField.get(index); + } + + @Override + public int advance(int target) { + return 0; + } + + @Override + public long cost() { + return 0; + } + }; + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + if (builder != null) { + builder.close(); + } + directory.close(); + } +} diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilderTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilderTests.java index b78130e72aba1..51ebc02ea8243 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilderTests.java @@ -22,6 +22,7 @@ import org.apache.lucene.util.InfoStream; import org.apache.lucene.util.Version; import org.opensearch.common.settings.Settings; +import org.opensearch.index.codec.composite.datacube.startree.StarTreeValues; import org.opensearch.index.compositeindex.datacube.Dimension; import org.opensearch.index.compositeindex.datacube.Metric; import org.opensearch.index.compositeindex.datacube.MetricStat; @@ -30,6 +31,7 @@ import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; import org.opensearch.index.compositeindex.datacube.startree.StarTreeFieldConfiguration; import org.opensearch.index.compositeindex.datacube.startree.aggregators.MetricAggregatorInfo; +import org.opensearch.index.compositeindex.datacube.startree.utils.SequentialDocValuesIterator; import org.opensearch.index.fielddata.IndexNumericFieldData; import org.opensearch.index.mapper.ContentPath; import org.opensearch.index.mapper.DocumentMapper; @@ -155,7 +157,10 @@ public static void setup() throws IOException { ); when(documentMapper.mappers()).thenReturn(fieldMappers); - builder = new BaseStarTreeBuilder(starTreeField, fieldProducerMap, state, mapperService) { + builder = new BaseStarTreeBuilder(starTreeField, state, mapperService) { + @Override + public void build(List starTreeValuesSubs) throws IOException {} + @Override public void appendStarTreeDocument(StarTreeDocument starTreeDocument) throws IOException {} @@ -171,11 +176,14 @@ public List getStarTreeDocuments() { @Override public Long getDimensionValue(int docId, int dimensionId) throws IOException { - return 0L; + return 0l; } @Override - public Iterator sortAndAggregateStarTreeDocuments() throws IOException { + public Iterator sortAndAggregateSegmentDocuments( + SequentialDocValuesIterator[] dimensionReaders, + List metricReaders + ) throws IOException { return null; } @@ -184,14 +192,19 @@ public Iterator generateStarTreeDocumentsForStarNode(int start throws IOException { return null; } + + @Override + Iterator mergeStarTrees(List starTreeValues) throws IOException { + return null; + } }; } public void test_generateMetricAggregatorInfos() throws IOException { - List metricAggregatorInfos = builder.generateMetricAggregatorInfos(mapperService, state); + List metricAggregatorInfos = builder.generateMetricAggregatorInfos(mapperService); List expectedMetricAggregatorInfos = List.of( - new MetricAggregatorInfo(MetricStat.SUM, "field2", starTreeField.getName(), IndexNumericFieldData.NumericType.DOUBLE, null), - new MetricAggregatorInfo(MetricStat.SUM, "field4", starTreeField.getName(), IndexNumericFieldData.NumericType.DOUBLE, null) + new MetricAggregatorInfo(MetricStat.SUM, "field2", starTreeField.getName(), IndexNumericFieldData.NumericType.DOUBLE), + new MetricAggregatorInfo(MetricStat.SUM, "field4", starTreeField.getName(), IndexNumericFieldData.NumericType.DOUBLE) ); assertEquals(metricAggregatorInfos, expectedMetricAggregatorInfos); } diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/OnHeapStarTreeBuilderTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/OnHeapStarTreeBuilderTests.java index 4e107e78d27be..aed08b7727be7 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/OnHeapStarTreeBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/OnHeapStarTreeBuilderTests.java @@ -8,699 +8,17 @@ package org.opensearch.index.compositeindex.datacube.startree.builder; -import org.apache.lucene.codecs.DocValuesProducer; -import org.apache.lucene.codecs.lucene99.Lucene99Codec; -import org.apache.lucene.index.DocValuesType; -import org.apache.lucene.index.FieldInfo; -import org.apache.lucene.index.FieldInfos; -import org.apache.lucene.index.IndexOptions; -import org.apache.lucene.index.SegmentInfo; import org.apache.lucene.index.SegmentWriteState; -import org.apache.lucene.index.VectorEncoding; -import org.apache.lucene.index.VectorSimilarityFunction; -import org.apache.lucene.sandbox.document.HalfFloatPoint; -import org.apache.lucene.store.Directory; -import org.apache.lucene.util.InfoStream; -import org.apache.lucene.util.NumericUtils; -import org.apache.lucene.util.Version; -import org.opensearch.common.settings.Settings; -import org.opensearch.index.compositeindex.datacube.Dimension; -import org.opensearch.index.compositeindex.datacube.Metric; -import org.opensearch.index.compositeindex.datacube.MetricStat; -import org.opensearch.index.compositeindex.datacube.NumericDimension; -import org.opensearch.index.compositeindex.datacube.startree.StarTreeDocument; import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; -import org.opensearch.index.compositeindex.datacube.startree.StarTreeFieldConfiguration; -import org.opensearch.index.mapper.ContentPath; -import org.opensearch.index.mapper.DocumentMapper; -import org.opensearch.index.mapper.Mapper; import org.opensearch.index.mapper.MapperService; -import org.opensearch.index.mapper.MappingLookup; -import org.opensearch.index.mapper.NumberFieldMapper; -import org.opensearch.test.OpenSearchTestCase; -import org.junit.Before; - -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.util.Collections; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; - -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -public class OnHeapStarTreeBuilderTests extends OpenSearchTestCase { - - private OnHeapStarTreeBuilder builder; - private MapperService mapperService; - private List dimensionsOrder; - private List fields = List.of(); - private List metrics; - private Directory directory; - private FieldInfo[] fieldsInfo; - private StarTreeField compositeField; - private Map fieldProducerMap; - private SegmentWriteState writeState; - - @Before - public void setup() throws IOException { - fields = List.of("field1", "field2", "field3", "field4", "field5", "field6", "field7", "field8", "field9", "field10"); - - dimensionsOrder = List.of( - new NumericDimension("field1"), - new NumericDimension("field3"), - new NumericDimension("field5"), - new NumericDimension("field8") - ); - metrics = List.of( - new Metric("field2", List.of(MetricStat.SUM)), - new Metric("field4", List.of(MetricStat.SUM)), - new Metric("field6", List.of(MetricStat.COUNT)) - ); - - DocValuesProducer docValuesProducer = mock(DocValuesProducer.class); - - compositeField = new StarTreeField( - "test", - dimensionsOrder, - metrics, - new StarTreeFieldConfiguration(1, Set.of("field8"), StarTreeFieldConfiguration.StarTreeBuildMode.ON_HEAP) - ); - directory = newFSDirectory(createTempDir()); - SegmentInfo segmentInfo = new SegmentInfo( - directory, - Version.LATEST, - Version.LUCENE_9_11_0, - "test_segment", - 5, - false, - false, - new Lucene99Codec(), - new HashMap<>(), - UUID.randomUUID().toString().substring(0, 16).getBytes(StandardCharsets.UTF_8), - new HashMap<>(), - null - ); - - fieldsInfo = new FieldInfo[fields.size()]; - fieldProducerMap = new HashMap<>(); - for (int i = 0; i < fieldsInfo.length; i++) { - fieldsInfo[i] = new FieldInfo( - fields.get(i), - i, - false, - false, - true, - IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS, - DocValuesType.SORTED_NUMERIC, - -1, - Collections.emptyMap(), - 0, - 0, - 0, - 0, - VectorEncoding.FLOAT32, - VectorSimilarityFunction.EUCLIDEAN, - false, - false - ); - fieldProducerMap.put(fields.get(i), docValuesProducer); - } - FieldInfos fieldInfos = new FieldInfos(fieldsInfo); - writeState = new SegmentWriteState(InfoStream.getDefault(), segmentInfo.dir, segmentInfo, fieldInfos, null, newIOContext(random())); - - mapperService = mock(MapperService.class); - DocumentMapper documentMapper = mock(DocumentMapper.class); - when(mapperService.documentMapper()).thenReturn(documentMapper); - Settings settings = Settings.builder().put(settings(org.opensearch.Version.CURRENT).build()).build(); - NumberFieldMapper numberFieldMapper1 = new NumberFieldMapper.Builder("field2", NumberFieldMapper.NumberType.DOUBLE, false, true) - .build(new Mapper.BuilderContext(settings, new ContentPath())); - NumberFieldMapper numberFieldMapper2 = new NumberFieldMapper.Builder("field4", NumberFieldMapper.NumberType.DOUBLE, false, true) - .build(new Mapper.BuilderContext(settings, new ContentPath())); - NumberFieldMapper numberFieldMapper3 = new NumberFieldMapper.Builder("field6", NumberFieldMapper.NumberType.DOUBLE, false, true) - .build(new Mapper.BuilderContext(settings, new ContentPath())); - MappingLookup fieldMappers = new MappingLookup( - Set.of(numberFieldMapper1, numberFieldMapper2, numberFieldMapper3), - Collections.emptyList(), - Collections.emptyList(), - 0, - null - ); - when(documentMapper.mappers()).thenReturn(fieldMappers); - builder = new OnHeapStarTreeBuilder(compositeField, fieldProducerMap, writeState, mapperService); - } - - public void test_sortAndAggregateStarTreeDocuments() throws IOException { - - int noOfStarTreeDocuments = 5; - StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; - - starTreeDocuments[0] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 12.0, 10.0, randomDouble() }); - starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 10.0, 6.0, randomDouble() }); - starTreeDocuments[2] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 14.0, 12.0, randomDouble() }); - starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 9.0, 4.0, randomDouble() }); - starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 11.0, 16.0, randomDouble() }); - - List inorderStarTreeDocuments = List.of( - new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L }), - new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, 34.0, 3L }) - ); - Iterator expectedStarTreeDocumentIterator = inorderStarTreeDocuments.iterator(); - - StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; - for (int i = 0; i < noOfStarTreeDocuments; i++) { - long metric1 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[0]); - long metric2 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[1]); - long metric3 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[2]); - segmentStarTreeDocuments[i] = new StarTreeDocument(starTreeDocuments[i].dimensions, new Long[] { metric1, metric2, metric3 }); - } - - Iterator segmentStarTreeDocumentIterator = builder.sortAndAggregateStarTreeDocuments(segmentStarTreeDocuments); - int numOfAggregatedDocuments = 0; - while (segmentStarTreeDocumentIterator.hasNext() && expectedStarTreeDocumentIterator.hasNext()) { - StarTreeDocument resultStarTreeDocument = segmentStarTreeDocumentIterator.next(); - StarTreeDocument expectedStarTreeDocument = expectedStarTreeDocumentIterator.next(); - - assertEquals(expectedStarTreeDocument.dimensions[0], resultStarTreeDocument.dimensions[0]); - assertEquals(expectedStarTreeDocument.dimensions[1], resultStarTreeDocument.dimensions[1]); - assertEquals(expectedStarTreeDocument.dimensions[2], resultStarTreeDocument.dimensions[2]); - assertEquals(expectedStarTreeDocument.dimensions[3], resultStarTreeDocument.dimensions[3]); - assertEquals(expectedStarTreeDocument.metrics[0], resultStarTreeDocument.metrics[0]); - assertEquals(expectedStarTreeDocument.metrics[1], resultStarTreeDocument.metrics[1]); - assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); - - numOfAggregatedDocuments++; - } - - assertEquals(inorderStarTreeDocuments.size(), numOfAggregatedDocuments); - - } - - public void test_sortAndAggregateStarTreeDocuments_nullMetric() throws IOException { - - int noOfStarTreeDocuments = 5; - StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; - - starTreeDocuments[0] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 12.0, 10.0, randomDouble() }); - starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 10.0, 6.0, randomDouble() }); - starTreeDocuments[2] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 14.0, 12.0, randomDouble() }); - starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 9.0, 4.0, randomDouble() }); - starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 11.0, null, randomDouble() }); - StarTreeDocument expectedStarTreeDocument = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 21.0, 14.0, 2.0 }); - - StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; - for (int i = 0; i < noOfStarTreeDocuments; i++) { - Long metric1 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[0]); - Long metric2 = starTreeDocuments[i].metrics[1] != null - ? NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[1]) - : null; - Long metric3 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[2]); - segmentStarTreeDocuments[i] = new StarTreeDocument(starTreeDocuments[i].dimensions, new Object[] { metric1, metric2, metric3 }); - } - - Iterator segmentStarTreeDocumentIterator = builder.sortAndAggregateStarTreeDocuments(segmentStarTreeDocuments); - - StarTreeDocument resultStarTreeDocument = segmentStarTreeDocumentIterator.next(); - assertEquals(expectedStarTreeDocument.dimensions[0], resultStarTreeDocument.dimensions[0]); - assertEquals(expectedStarTreeDocument.dimensions[1], resultStarTreeDocument.dimensions[1]); - assertEquals(expectedStarTreeDocument.dimensions[2], resultStarTreeDocument.dimensions[2]); - assertEquals(expectedStarTreeDocument.dimensions[3], resultStarTreeDocument.dimensions[3]); - assertEquals(expectedStarTreeDocument.metrics[0], resultStarTreeDocument.metrics[0]); - assertEquals(expectedStarTreeDocument.metrics[1], resultStarTreeDocument.metrics[1]); - - assertThrows( - "Null metric should have resulted in IllegalStateException", - IllegalStateException.class, - segmentStarTreeDocumentIterator::next - ); - - } - - public void test_sortAndAggregateStarTreeDocument_longMaxAndLongMinDimensions() throws IOException { - - int noOfStarTreeDocuments = 5; - StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; - - starTreeDocuments[0] = new StarTreeDocument(new Long[] { Long.MIN_VALUE, 4L, 3L, 4L }, new Double[] { 12.0, 10.0, randomDouble() }); - starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, Long.MAX_VALUE }, new Double[] { 10.0, 6.0, randomDouble() }); - starTreeDocuments[2] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, Long.MAX_VALUE }, new Double[] { 14.0, 12.0, randomDouble() }); - starTreeDocuments[3] = new StarTreeDocument(new Long[] { Long.MIN_VALUE, 4L, 3L, 4L }, new Double[] { 9.0, 4.0, randomDouble() }); - starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, Long.MAX_VALUE }, new Double[] { 11.0, 16.0, randomDouble() }); - - List inorderStarTreeDocuments = List.of( - new StarTreeDocument(new Long[] { Long.MIN_VALUE, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L }), - new StarTreeDocument(new Long[] { 3L, 4L, 2L, Long.MAX_VALUE }, new Object[] { 35.0, 34.0, 3L }) - ); - Iterator expectedStarTreeDocumentIterator = inorderStarTreeDocuments.iterator(); - - StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; - for (int i = 0; i < noOfStarTreeDocuments; i++) { - long metric1 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[0]); - long metric2 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[1]); - long metric3 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[2]); - segmentStarTreeDocuments[i] = new StarTreeDocument(starTreeDocuments[i].dimensions, new Long[] { metric1, metric2, metric3 }); - } - - Iterator segmentStarTreeDocumentIterator = builder.sortAndAggregateStarTreeDocuments(segmentStarTreeDocuments); - int numOfAggregatedDocuments = 0; - while (segmentStarTreeDocumentIterator.hasNext() && expectedStarTreeDocumentIterator.hasNext()) { - StarTreeDocument resultStarTreeDocument = segmentStarTreeDocumentIterator.next(); - StarTreeDocument expectedStarTreeDocument = expectedStarTreeDocumentIterator.next(); - - assertEquals(expectedStarTreeDocument.dimensions[0], resultStarTreeDocument.dimensions[0]); - assertEquals(expectedStarTreeDocument.dimensions[1], resultStarTreeDocument.dimensions[1]); - assertEquals(expectedStarTreeDocument.dimensions[2], resultStarTreeDocument.dimensions[2]); - assertEquals(expectedStarTreeDocument.dimensions[3], resultStarTreeDocument.dimensions[3]); - assertEquals(expectedStarTreeDocument.metrics[0], resultStarTreeDocument.metrics[0]); - assertEquals(expectedStarTreeDocument.metrics[1], resultStarTreeDocument.metrics[1]); - assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); - - numOfAggregatedDocuments++; - } - - assertEquals(inorderStarTreeDocuments.size(), numOfAggregatedDocuments); - - } - - public void test_sortAndAggregateStarTreeDocument_DoubleMaxAndDoubleMinMetrics() throws IOException { - - int noOfStarTreeDocuments = 5; - StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; - - starTreeDocuments[0] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { Double.MAX_VALUE, 10.0, randomDouble() }); - starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 10.0, 6.0, randomDouble() }); - starTreeDocuments[2] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 14.0, Double.MIN_VALUE, randomDouble() }); - starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 9.0, 4.0, randomDouble() }); - starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 11.0, 16.0, randomDouble() }); - - List inorderStarTreeDocuments = List.of( - new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { Double.MAX_VALUE + 9, 14.0, 2L }), - new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, Double.MIN_VALUE + 22, 3L }) - ); - Iterator expectedStarTreeDocumentIterator = inorderStarTreeDocuments.iterator(); - - StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; - for (int i = 0; i < noOfStarTreeDocuments; i++) { - long metric1 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[0]); - long metric2 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[1]); - long metric3 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[2]); - segmentStarTreeDocuments[i] = new StarTreeDocument(starTreeDocuments[i].dimensions, new Long[] { metric1, metric2, metric3 }); - } - - Iterator segmentStarTreeDocumentIterator = builder.sortAndAggregateStarTreeDocuments(segmentStarTreeDocuments); - int numOfAggregatedDocuments = 0; - while (segmentStarTreeDocumentIterator.hasNext() && expectedStarTreeDocumentIterator.hasNext()) { - StarTreeDocument resultStarTreeDocument = segmentStarTreeDocumentIterator.next(); - StarTreeDocument expectedStarTreeDocument = expectedStarTreeDocumentIterator.next(); - - assertEquals(expectedStarTreeDocument.dimensions[0], resultStarTreeDocument.dimensions[0]); - assertEquals(expectedStarTreeDocument.dimensions[1], resultStarTreeDocument.dimensions[1]); - assertEquals(expectedStarTreeDocument.dimensions[2], resultStarTreeDocument.dimensions[2]); - assertEquals(expectedStarTreeDocument.dimensions[3], resultStarTreeDocument.dimensions[3]); - assertEquals(expectedStarTreeDocument.metrics[0], resultStarTreeDocument.metrics[0]); - assertEquals(expectedStarTreeDocument.metrics[1], resultStarTreeDocument.metrics[1]); - assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); - - numOfAggregatedDocuments++; - } - - assertEquals(inorderStarTreeDocuments.size(), numOfAggregatedDocuments); - - } - - public void test_build_halfFloatMetrics() throws IOException { - - mapperService = mock(MapperService.class); - DocumentMapper documentMapper = mock(DocumentMapper.class); - when(mapperService.documentMapper()).thenReturn(documentMapper); - Settings settings = Settings.builder().put(settings(org.opensearch.Version.CURRENT).build()).build(); - NumberFieldMapper numberFieldMapper1 = new NumberFieldMapper.Builder("field2", NumberFieldMapper.NumberType.HALF_FLOAT, false, true) - .build(new Mapper.BuilderContext(settings, new ContentPath())); - NumberFieldMapper numberFieldMapper2 = new NumberFieldMapper.Builder("field4", NumberFieldMapper.NumberType.HALF_FLOAT, false, true) - .build(new Mapper.BuilderContext(settings, new ContentPath())); - NumberFieldMapper numberFieldMapper3 = new NumberFieldMapper.Builder("field6", NumberFieldMapper.NumberType.HALF_FLOAT, false, true) - .build(new Mapper.BuilderContext(settings, new ContentPath())); - MappingLookup fieldMappers = new MappingLookup( - Set.of(numberFieldMapper1, numberFieldMapper2, numberFieldMapper3), - Collections.emptyList(), - Collections.emptyList(), - 0, - null - ); - when(documentMapper.mappers()).thenReturn(fieldMappers); - builder = new OnHeapStarTreeBuilder(compositeField, fieldProducerMap, writeState, mapperService); - - int noOfStarTreeDocuments = 5; - StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; - - starTreeDocuments[0] = new StarTreeDocument( - new Long[] { 2L, 4L, 3L, 4L }, - new HalfFloatPoint[] { new HalfFloatPoint("hf1", 12), new HalfFloatPoint("hf6", 10), new HalfFloatPoint("field6", 10) } - ); - starTreeDocuments[1] = new StarTreeDocument( - new Long[] { 3L, 4L, 2L, 1L }, - new HalfFloatPoint[] { new HalfFloatPoint("hf2", 10), new HalfFloatPoint("hf7", 6), new HalfFloatPoint("field6", 10) } - ); - starTreeDocuments[2] = new StarTreeDocument( - new Long[] { 3L, 4L, 2L, 1L }, - new HalfFloatPoint[] { new HalfFloatPoint("hf3", 14), new HalfFloatPoint("hf8", 12), new HalfFloatPoint("field6", 10) } - ); - starTreeDocuments[3] = new StarTreeDocument( - new Long[] { 2L, 4L, 3L, 4L }, - new HalfFloatPoint[] { new HalfFloatPoint("hf4", 9), new HalfFloatPoint("hf9", 4), new HalfFloatPoint("field6", 10) } - ); - starTreeDocuments[4] = new StarTreeDocument( - new Long[] { 3L, 4L, 2L, 1L }, - new HalfFloatPoint[] { new HalfFloatPoint("hf5", 11), new HalfFloatPoint("hf10", 16), new HalfFloatPoint("field6", 10) } - ); - - StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; - for (int i = 0; i < noOfStarTreeDocuments; i++) { - long metric1 = HalfFloatPoint.halfFloatToSortableShort( - ((HalfFloatPoint) starTreeDocuments[i].metrics[0]).numericValue().floatValue() - ); - long metric2 = HalfFloatPoint.halfFloatToSortableShort( - ((HalfFloatPoint) starTreeDocuments[i].metrics[1]).numericValue().floatValue() - ); - long metric3 = HalfFloatPoint.halfFloatToSortableShort( - ((HalfFloatPoint) starTreeDocuments[i].metrics[2]).numericValue().floatValue() - ); - segmentStarTreeDocuments[i] = new StarTreeDocument(starTreeDocuments[i].dimensions, new Long[] { metric1, metric2, metric3 }); - } - - Iterator segmentStarTreeDocumentIterator = builder.sortAndAggregateStarTreeDocuments(segmentStarTreeDocuments); - builder.build(segmentStarTreeDocumentIterator); - - List resultStarTreeDocuments = builder.getStarTreeDocuments(); - assertEquals(7, resultStarTreeDocuments.size()); - - Iterator expectedStarTreeDocumentIterator = getExpectedStarTreeDocumentIterator(); - assertStarTreeDocuments(resultStarTreeDocuments, expectedStarTreeDocumentIterator); - } - - public void test_build_floatMetrics() throws IOException { - - mapperService = mock(MapperService.class); - DocumentMapper documentMapper = mock(DocumentMapper.class); - when(mapperService.documentMapper()).thenReturn(documentMapper); - Settings settings = Settings.builder().put(settings(org.opensearch.Version.CURRENT).build()).build(); - NumberFieldMapper numberFieldMapper1 = new NumberFieldMapper.Builder("field2", NumberFieldMapper.NumberType.FLOAT, false, true) - .build(new Mapper.BuilderContext(settings, new ContentPath())); - NumberFieldMapper numberFieldMapper2 = new NumberFieldMapper.Builder("field4", NumberFieldMapper.NumberType.FLOAT, false, true) - .build(new Mapper.BuilderContext(settings, new ContentPath())); - NumberFieldMapper numberFieldMapper3 = new NumberFieldMapper.Builder("field6", NumberFieldMapper.NumberType.FLOAT, false, true) - .build(new Mapper.BuilderContext(settings, new ContentPath())); - MappingLookup fieldMappers = new MappingLookup( - Set.of(numberFieldMapper1, numberFieldMapper2, numberFieldMapper3), - Collections.emptyList(), - Collections.emptyList(), - 0, - null - ); - when(documentMapper.mappers()).thenReturn(fieldMappers); - builder = new OnHeapStarTreeBuilder(compositeField, fieldProducerMap, writeState, mapperService); - - int noOfStarTreeDocuments = 5; - StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; - - starTreeDocuments[0] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Float[] { 12.0F, 10.0F, randomFloat() }); - starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Float[] { 10.0F, 6.0F, randomFloat() }); - starTreeDocuments[2] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Float[] { 14.0F, 12.0F, randomFloat() }); - starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Float[] { 9.0F, 4.0F, randomFloat() }); - starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Float[] { 11.0F, 16.0F, randomFloat() }); - - StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; - for (int i = 0; i < noOfStarTreeDocuments; i++) { - long metric1 = NumericUtils.floatToSortableInt((Float) starTreeDocuments[i].metrics[0]); - long metric2 = NumericUtils.floatToSortableInt((Float) starTreeDocuments[i].metrics[1]); - long metric3 = NumericUtils.floatToSortableInt((Float) starTreeDocuments[i].metrics[2]); - segmentStarTreeDocuments[i] = new StarTreeDocument(starTreeDocuments[i].dimensions, new Long[] { metric1, metric2, metric3 }); - } - - Iterator segmentStarTreeDocumentIterator = builder.sortAndAggregateStarTreeDocuments(segmentStarTreeDocuments); - builder.build(segmentStarTreeDocumentIterator); - - List resultStarTreeDocuments = builder.getStarTreeDocuments(); - assertEquals(7, resultStarTreeDocuments.size()); - - Iterator expectedStarTreeDocumentIterator = getExpectedStarTreeDocumentIterator(); - assertStarTreeDocuments(resultStarTreeDocuments, expectedStarTreeDocumentIterator); - } - - public void test_build_longMetrics() throws IOException { - - mapperService = mock(MapperService.class); - DocumentMapper documentMapper = mock(DocumentMapper.class); - when(mapperService.documentMapper()).thenReturn(documentMapper); - Settings settings = Settings.builder().put(settings(org.opensearch.Version.CURRENT).build()).build(); - NumberFieldMapper numberFieldMapper1 = new NumberFieldMapper.Builder("field2", NumberFieldMapper.NumberType.LONG, false, true) - .build(new Mapper.BuilderContext(settings, new ContentPath())); - NumberFieldMapper numberFieldMapper2 = new NumberFieldMapper.Builder("field4", NumberFieldMapper.NumberType.LONG, false, true) - .build(new Mapper.BuilderContext(settings, new ContentPath())); - NumberFieldMapper numberFieldMapper3 = new NumberFieldMapper.Builder("field6", NumberFieldMapper.NumberType.LONG, false, true) - .build(new Mapper.BuilderContext(settings, new ContentPath())); - MappingLookup fieldMappers = new MappingLookup( - Set.of(numberFieldMapper1, numberFieldMapper2, numberFieldMapper3), - Collections.emptyList(), - Collections.emptyList(), - 0, - null - ); - when(documentMapper.mappers()).thenReturn(fieldMappers); - builder = new OnHeapStarTreeBuilder(compositeField, fieldProducerMap, writeState, mapperService); - - int noOfStarTreeDocuments = 5; - StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; - - starTreeDocuments[0] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Long[] { 12L, 10L, randomLong() }); - starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Long[] { 10L, 6L, randomLong() }); - starTreeDocuments[2] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Long[] { 14L, 12L, randomLong() }); - starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Long[] { 9L, 4L, randomLong() }); - starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Long[] { 11L, 16L, randomLong() }); - - StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; - for (int i = 0; i < noOfStarTreeDocuments; i++) { - long metric1 = (Long) starTreeDocuments[i].metrics[0]; - long metric2 = (Long) starTreeDocuments[i].metrics[1]; - long metric3 = (Long) starTreeDocuments[i].metrics[2]; - segmentStarTreeDocuments[i] = new StarTreeDocument(starTreeDocuments[i].dimensions, new Long[] { metric1, metric2, metric3 }); - } - - Iterator segmentStarTreeDocumentIterator = builder.sortAndAggregateStarTreeDocuments(segmentStarTreeDocuments); - builder.build(segmentStarTreeDocumentIterator); - - List resultStarTreeDocuments = builder.getStarTreeDocuments(); - assertEquals(7, resultStarTreeDocuments.size()); - - Iterator expectedStarTreeDocumentIterator = getExpectedStarTreeDocumentIterator(); - assertStarTreeDocuments(resultStarTreeDocuments, expectedStarTreeDocumentIterator); - } - - private static Iterator getExpectedStarTreeDocumentIterator() { - List expectedStarTreeDocuments = List.of( - new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L }), - new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, 34.0, 3L }), - new StarTreeDocument(new Long[] { -1L, 4L, 2L, 1L }, new Object[] { 35.0, 34.0, 3L }), - new StarTreeDocument(new Long[] { -1L, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L }), - new StarTreeDocument(new Long[] { -1L, 4L, -1L, 1L }, new Object[] { 35.0, 34.0, 3L }), - new StarTreeDocument(new Long[] { -1L, 4L, -1L, 4L }, new Object[] { 21.0, 14.0, 2L }), - new StarTreeDocument(new Long[] { -1L, 4L, -1L, -1L }, new Object[] { 56.0, 48.0, 5L }) - ); - return expectedStarTreeDocuments.iterator(); - } - - public void test_build() throws IOException { - - int noOfStarTreeDocuments = 5; - StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; - - starTreeDocuments[0] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 12.0, 10.0, randomDouble() }); - starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 10.0, 6.0, randomDouble() }); - starTreeDocuments[2] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 14.0, 12.0, randomDouble() }); - starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 9.0, 4.0, randomDouble() }); - starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 11.0, 16.0, randomDouble() }); - - StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; - for (int i = 0; i < noOfStarTreeDocuments; i++) { - long metric1 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[0]); - long metric2 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[1]); - long metric3 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[2]); - segmentStarTreeDocuments[i] = new StarTreeDocument(starTreeDocuments[i].dimensions, new Long[] { metric1, metric2, metric3 }); - } - - Iterator segmentStarTreeDocumentIterator = builder.sortAndAggregateStarTreeDocuments(segmentStarTreeDocuments); - builder.build(segmentStarTreeDocumentIterator); - - List resultStarTreeDocuments = builder.getStarTreeDocuments(); - assertEquals(7, resultStarTreeDocuments.size()); - - Iterator expectedStarTreeDocumentIterator = getExpectedStarTreeDocumentIterator(); - assertStarTreeDocuments(resultStarTreeDocuments, expectedStarTreeDocumentIterator); - } - - private void assertStarTreeDocuments( - List resultStarTreeDocuments, - Iterator expectedStarTreeDocumentIterator - ) { - Iterator resultStarTreeDocumentIterator = resultStarTreeDocuments.iterator(); - while (resultStarTreeDocumentIterator.hasNext() && expectedStarTreeDocumentIterator.hasNext()) { - StarTreeDocument resultStarTreeDocument = resultStarTreeDocumentIterator.next(); - StarTreeDocument expectedStarTreeDocument = expectedStarTreeDocumentIterator.next(); - - assertEquals(expectedStarTreeDocument.dimensions[0], resultStarTreeDocument.dimensions[0]); - assertEquals(expectedStarTreeDocument.dimensions[1], resultStarTreeDocument.dimensions[1]); - assertEquals(expectedStarTreeDocument.dimensions[2], resultStarTreeDocument.dimensions[2]); - assertEquals(expectedStarTreeDocument.dimensions[3], resultStarTreeDocument.dimensions[3]); - assertEquals(expectedStarTreeDocument.metrics[0], resultStarTreeDocument.metrics[0]); - assertEquals(expectedStarTreeDocument.metrics[1], resultStarTreeDocument.metrics[1]); - assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); - } - } - - public void test_build_starTreeDataset() throws IOException { - - fields = List.of("fieldC", "fieldB", "fieldL", "fieldI"); - - dimensionsOrder = List.of(new NumericDimension("fieldC"), new NumericDimension("fieldB"), new NumericDimension("fieldL")); - metrics = List.of(new Metric("fieldI", List.of(MetricStat.SUM))); - - DocValuesProducer docValuesProducer = mock(DocValuesProducer.class); - - compositeField = new StarTreeField( - "test", - dimensionsOrder, - metrics, - new StarTreeFieldConfiguration(1, Set.of(), StarTreeFieldConfiguration.StarTreeBuildMode.ON_HEAP) - ); - SegmentInfo segmentInfo = new SegmentInfo( - directory, - Version.LATEST, - Version.LUCENE_9_11_0, - "test_segment", - 7, - false, - false, - new Lucene99Codec(), - new HashMap<>(), - UUID.randomUUID().toString().substring(0, 16).getBytes(StandardCharsets.UTF_8), - new HashMap<>(), - null - ); - - fieldsInfo = new FieldInfo[fields.size()]; - fieldProducerMap = new HashMap<>(); - for (int i = 0; i < fieldsInfo.length; i++) { - fieldsInfo[i] = new FieldInfo( - fields.get(i), - i, - false, - false, - true, - IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS, - DocValuesType.SORTED_NUMERIC, - -1, - Collections.emptyMap(), - 0, - 0, - 0, - 0, - VectorEncoding.FLOAT32, - VectorSimilarityFunction.EUCLIDEAN, - false, - false - ); - fieldProducerMap.put(fields.get(i), docValuesProducer); - } - FieldInfos fieldInfos = new FieldInfos(fieldsInfo); - writeState = new SegmentWriteState(InfoStream.getDefault(), segmentInfo.dir, segmentInfo, fieldInfos, null, newIOContext(random())); - - mapperService = mock(MapperService.class); - DocumentMapper documentMapper = mock(DocumentMapper.class); - when(mapperService.documentMapper()).thenReturn(documentMapper); - Settings settings = Settings.builder().put(settings(org.opensearch.Version.CURRENT).build()).build(); - NumberFieldMapper numberFieldMapper1 = new NumberFieldMapper.Builder("fieldI", NumberFieldMapper.NumberType.DOUBLE, false, true) - .build(new Mapper.BuilderContext(settings, new ContentPath())); - MappingLookup fieldMappers = new MappingLookup( - Set.of(numberFieldMapper1), - Collections.emptyList(), - Collections.emptyList(), - 0, - null - ); - when(documentMapper.mappers()).thenReturn(fieldMappers); - builder = new OnHeapStarTreeBuilder(compositeField, fieldProducerMap, writeState, mapperService); - - int noOfStarTreeDocuments = 7; - StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; - starTreeDocuments[0] = new StarTreeDocument(new Long[] { 1L, 11L, 21L }, new Double[] { 400.0 }); - starTreeDocuments[1] = new StarTreeDocument(new Long[] { 1L, 12L, 22L }, new Double[] { 200.0 }); - starTreeDocuments[2] = new StarTreeDocument(new Long[] { 2L, 13L, 23L }, new Double[] { 300.0 }); - starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 13L, 21L }, new Double[] { 100.0 }); - starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 11L, 21L }, new Double[] { 600.0 }); - starTreeDocuments[5] = new StarTreeDocument(new Long[] { 3L, 12L, 23L }, new Double[] { 200.0 }); - starTreeDocuments[6] = new StarTreeDocument(new Long[] { 3L, 12L, 21L }, new Double[] { 400.0 }); - - StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; - for (int i = 0; i < noOfStarTreeDocuments; i++) { - long metric1 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[0]); - segmentStarTreeDocuments[i] = new StarTreeDocument(starTreeDocuments[i].dimensions, new Long[] { metric1 }); - } - - Iterator segmentStarTreeDocumentIterator = builder.sortAndAggregateStarTreeDocuments(segmentStarTreeDocuments); - builder.build(segmentStarTreeDocumentIterator); - - List resultStarTreeDocuments = builder.getStarTreeDocuments(); - List expectedStarTreeDocuments = List.of( - new StarTreeDocument(new Long[] { 1L, 11L, 21L }, new Object[] { 400.0 }), - new StarTreeDocument(new Long[] { 1L, 12L, 22L }, new Object[] { 200.0 }), - new StarTreeDocument(new Long[] { 2L, 13L, 21L }, new Object[] { 100.0 }), - new StarTreeDocument(new Long[] { 2L, 13L, 23L }, new Object[] { 300.0 }), - new StarTreeDocument(new Long[] { 3L, 11L, 21L }, new Object[] { 600.0 }), - new StarTreeDocument(new Long[] { 3L, 12L, 21L }, new Object[] { 400.0 }), - new StarTreeDocument(new Long[] { 3L, 12L, 23L }, new Object[] { 200.0 }), - new StarTreeDocument(new Long[] { -1L, 11L, 21L }, new Object[] { 1000.0 }), - new StarTreeDocument(new Long[] { -1L, 12L, 21L }, new Object[] { 400.0 }), - new StarTreeDocument(new Long[] { -1L, 12L, 22L }, new Object[] { 200.0 }), - new StarTreeDocument(new Long[] { -1L, 12L, 23L }, new Object[] { 200.0 }), - new StarTreeDocument(new Long[] { -1L, 13L, 21L }, new Object[] { 100.0 }), - new StarTreeDocument(new Long[] { -1L, 13L, 23L }, new Object[] { 300.0 }), - new StarTreeDocument(new Long[] { -1L, -1L, 21L }, new Object[] { 1500.0 }), - new StarTreeDocument(new Long[] { -1L, -1L, 22L }, new Object[] { 200.0 }), - new StarTreeDocument(new Long[] { -1L, -1L, 23L }, new Object[] { 500.0 }), - new StarTreeDocument(new Long[] { -1L, -1L, -1L }, new Object[] { 2200.0 }), - new StarTreeDocument(new Long[] { -1L, 12L, -1L }, new Object[] { 800.0 }), - new StarTreeDocument(new Long[] { -1L, 13L, -1L }, new Object[] { 400.0 }), - new StarTreeDocument(new Long[] { 1L, -1L, 21L }, new Object[] { 400.0 }), - new StarTreeDocument(new Long[] { 1L, -1L, 22L }, new Object[] { 200.0 }), - new StarTreeDocument(new Long[] { 1L, -1L, -1L }, new Object[] { 600.0 }), - new StarTreeDocument(new Long[] { 2L, 13L, -1L }, new Object[] { 400.0 }), - new StarTreeDocument(new Long[] { 3L, -1L, 21L }, new Object[] { 1000.0 }), - new StarTreeDocument(new Long[] { 3L, -1L, 23L }, new Object[] { 200.0 }), - new StarTreeDocument(new Long[] { 3L, -1L, -1L }, new Object[] { 1200.0 }), - new StarTreeDocument(new Long[] { 3L, 12L, -1L }, new Object[] { 600.0 }) - ); - - Iterator expectedStarTreeDocumentIterator = expectedStarTreeDocuments.iterator(); - Iterator resultStarTreeDocumentIterator = resultStarTreeDocuments.iterator(); - while (resultStarTreeDocumentIterator.hasNext() && expectedStarTreeDocumentIterator.hasNext()) { - StarTreeDocument resultStarTreeDocument = resultStarTreeDocumentIterator.next(); - StarTreeDocument expectedStarTreeDocument = expectedStarTreeDocumentIterator.next(); - - assertEquals(expectedStarTreeDocument.dimensions[0], resultStarTreeDocument.dimensions[0]); - assertEquals(expectedStarTreeDocument.dimensions[1], resultStarTreeDocument.dimensions[1]); - assertEquals(expectedStarTreeDocument.dimensions[2], resultStarTreeDocument.dimensions[2]); - assertEquals(expectedStarTreeDocument.metrics[0], resultStarTreeDocument.metrics[0]); - } - - } +public class OnHeapStarTreeBuilderTests extends AbstractStarTreeBuilderTests { @Override - public void tearDown() throws Exception { - super.tearDown(); - directory.close(); + public BaseStarTreeBuilder getStarTreeBuilder( + StarTreeField starTreeField, + SegmentWriteState segmentWriteState, + MapperService mapperService + ) { + return new OnHeapStarTreeBuilder(starTreeField, segmentWriteState, mapperService); } } diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeDocValuesIteratorAdapterTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeDocValuesIteratorAdapterTests.java deleted file mode 100644 index 9c2621401faa4..0000000000000 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeDocValuesIteratorAdapterTests.java +++ /dev/null @@ -1,139 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.compositeindex.datacube.startree.builder; - -import org.apache.lucene.codecs.DocValuesProducer; -import org.apache.lucene.index.DocValuesType; -import org.apache.lucene.index.FieldInfo; -import org.apache.lucene.index.IndexOptions; -import org.apache.lucene.index.SortedNumericDocValues; -import org.apache.lucene.index.VectorEncoding; -import org.apache.lucene.index.VectorSimilarityFunction; -import org.apache.lucene.search.DocIdSetIterator; -import org.opensearch.index.compositeindex.datacube.startree.utils.SequentialDocValuesIterator; -import org.opensearch.test.OpenSearchTestCase; - -import java.io.IOException; -import java.util.Collections; - -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -public class StarTreeDocValuesIteratorAdapterTests extends OpenSearchTestCase { - - private StarTreeDocValuesIteratorAdapter adapter; - - @Override - public void setUp() throws Exception { - super.setUp(); - adapter = new StarTreeDocValuesIteratorAdapter(); - } - - public void testGetDocValuesIterator() throws IOException { - DocValuesProducer mockProducer = mock(DocValuesProducer.class); - SortedNumericDocValues mockSortedNumericDocValues = mock(SortedNumericDocValues.class); - - when(mockProducer.getSortedNumeric(any())).thenReturn(mockSortedNumericDocValues); - - SequentialDocValuesIterator iterator = adapter.getDocValuesIterator(DocValuesType.SORTED_NUMERIC, any(), mockProducer); - - assertNotNull(iterator); - assertEquals(mockSortedNumericDocValues, iterator.getDocIdSetIterator()); - } - - public void testGetDocValuesIteratorWithUnsupportedType() { - DocValuesProducer mockProducer = mock(DocValuesProducer.class); - FieldInfo fieldInfo = new FieldInfo( - "random_field", - 0, - false, - false, - true, - IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS, - DocValuesType.SORTED_NUMERIC, - -1, - Collections.emptyMap(), - 0, - 0, - 0, - 0, - VectorEncoding.FLOAT32, - VectorSimilarityFunction.EUCLIDEAN, - false, - false - ); - IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> { - adapter.getDocValuesIterator(DocValuesType.BINARY, fieldInfo, mockProducer); - }); - - assertEquals("Unsupported DocValuesType: BINARY", exception.getMessage()); - } - - public void testGetNextValue() throws IOException { - SortedNumericDocValues mockSortedNumericDocValues = mock(SortedNumericDocValues.class); - SequentialDocValuesIterator iterator = new SequentialDocValuesIterator(mockSortedNumericDocValues); - iterator.setDocId(1); - when(mockSortedNumericDocValues.nextValue()).thenReturn(42L); - - Long nextValue = adapter.getNextValue(iterator, 1); - - assertEquals(Long.valueOf(42L), nextValue); - assertEquals(Long.valueOf(42L), iterator.getDocValue()); - } - - public void testGetNextValueWithInvalidDocId() { - SortedNumericDocValues mockSortedNumericDocValues = mock(SortedNumericDocValues.class); - SequentialDocValuesIterator iterator = new SequentialDocValuesIterator(mockSortedNumericDocValues); - iterator.setDocId(DocIdSetIterator.NO_MORE_DOCS); - - IllegalStateException exception = expectThrows(IllegalStateException.class, () -> { adapter.getNextValue(iterator, 1); }); - - assertEquals("invalid doc id to fetch the next value", exception.getMessage()); - } - - public void testGetNextValueWithUnsupportedIterator() { - DocIdSetIterator mockIterator = mock(DocIdSetIterator.class); - SequentialDocValuesIterator iterator = new SequentialDocValuesIterator(mockIterator); - - IllegalStateException exception = expectThrows(IllegalStateException.class, () -> { adapter.getNextValue(iterator, 1); }); - - assertEquals("Unsupported Iterator: " + mockIterator.toString(), exception.getMessage()); - } - - public void testNextDoc() throws IOException { - SortedNumericDocValues mockSortedNumericDocValues = mock(SortedNumericDocValues.class); - SequentialDocValuesIterator iterator = new SequentialDocValuesIterator(mockSortedNumericDocValues); - when(mockSortedNumericDocValues.nextDoc()).thenReturn(2, 3, DocIdSetIterator.NO_MORE_DOCS); - when(mockSortedNumericDocValues.nextValue()).thenReturn(42L, 32L); - - int nextDocId = adapter.nextDoc(iterator, 1); - assertEquals(2, nextDocId); - assertEquals(Long.valueOf(42L), adapter.getNextValue(iterator, nextDocId)); - - nextDocId = adapter.nextDoc(iterator, 2); - assertEquals(3, nextDocId); - when(mockSortedNumericDocValues.nextValue()).thenReturn(42L, 32L); - - } - - public void testNextDoc_noMoreDocs() throws IOException { - SortedNumericDocValues mockSortedNumericDocValues = mock(SortedNumericDocValues.class); - SequentialDocValuesIterator iterator = new SequentialDocValuesIterator(mockSortedNumericDocValues); - when(mockSortedNumericDocValues.nextDoc()).thenReturn(2, DocIdSetIterator.NO_MORE_DOCS); - when(mockSortedNumericDocValues.nextValue()).thenReturn(42L, 32L); - - int nextDocId = adapter.nextDoc(iterator, 1); - assertEquals(2, nextDocId); - assertEquals(Long.valueOf(42L), adapter.getNextValue(iterator, nextDocId)); - - assertThrows(IllegalStateException.class, () -> adapter.nextDoc(iterator, 2)); - - } -} diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeValuesIteratorFactoryTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeValuesIteratorFactoryTests.java deleted file mode 100644 index 1aba67533d52e..0000000000000 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeValuesIteratorFactoryTests.java +++ /dev/null @@ -1,131 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.compositeindex.datacube.startree.builder; - -import org.apache.lucene.codecs.DocValuesProducer; -import org.apache.lucene.index.DocValuesType; -import org.apache.lucene.index.FieldInfo; -import org.apache.lucene.index.IndexOptions; -import org.apache.lucene.index.SortedNumericDocValues; -import org.apache.lucene.index.VectorEncoding; -import org.apache.lucene.index.VectorSimilarityFunction; -import org.apache.lucene.search.DocIdSetIterator; -import org.opensearch.index.compositeindex.datacube.startree.utils.SequentialDocValuesIterator; -import org.opensearch.test.OpenSearchTestCase; -import org.junit.BeforeClass; - -import java.io.IOException; -import java.util.Collections; - -import org.mockito.Mockito; - -import static org.mockito.Mockito.when; - -public class StarTreeValuesIteratorFactoryTests extends OpenSearchTestCase { - - private static StarTreeDocValuesIteratorAdapter starTreeDocValuesIteratorAdapter; - private static FieldInfo mockFieldInfo; - - @BeforeClass - public static void setup() { - starTreeDocValuesIteratorAdapter = new StarTreeDocValuesIteratorAdapter(); - mockFieldInfo = new FieldInfo( - "field", - 1, - false, - false, - true, - IndexOptions.NONE, - DocValuesType.NONE, - -1, - Collections.emptyMap(), - 0, - 0, - 0, - 0, - VectorEncoding.FLOAT32, - VectorSimilarityFunction.EUCLIDEAN, - false, - false - ); - } - - public void testCreateIterator_SortedNumeric() throws IOException { - DocValuesProducer producer = Mockito.mock(DocValuesProducer.class); - SortedNumericDocValues iterator = Mockito.mock(SortedNumericDocValues.class); - when(producer.getSortedNumeric(mockFieldInfo)).thenReturn(iterator); - SequentialDocValuesIterator result = starTreeDocValuesIteratorAdapter.getDocValuesIterator( - DocValuesType.SORTED_NUMERIC, - mockFieldInfo, - producer - ); - assertEquals(iterator.getClass(), result.getDocIdSetIterator().getClass()); - } - - public void testCreateIterator_UnsupportedType() { - DocValuesProducer producer = Mockito.mock(DocValuesProducer.class); - IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> { - starTreeDocValuesIteratorAdapter.getDocValuesIterator(DocValuesType.BINARY, mockFieldInfo, producer); - }); - assertEquals("Unsupported DocValuesType: BINARY", exception.getMessage()); - } - - public void testGetNextValue_SortedNumeric() throws IOException { - SortedNumericDocValues iterator = Mockito.mock(SortedNumericDocValues.class); - when(iterator.nextDoc()).thenReturn(0); - when(iterator.nextValue()).thenReturn(123L); - SequentialDocValuesIterator sequentialDocValuesIterator = new SequentialDocValuesIterator(iterator); - sequentialDocValuesIterator.getDocIdSetIterator().nextDoc(); - long result = starTreeDocValuesIteratorAdapter.getNextValue(sequentialDocValuesIterator, 0); - assertEquals(123L, result); - } - - public void testGetNextValue_UnsupportedIterator() { - DocIdSetIterator iterator = Mockito.mock(DocIdSetIterator.class); - SequentialDocValuesIterator sequentialDocValuesIterator = new SequentialDocValuesIterator(iterator); - - IllegalStateException exception = expectThrows(IllegalStateException.class, () -> { - starTreeDocValuesIteratorAdapter.getNextValue(sequentialDocValuesIterator, 0); - }); - assertEquals("Unsupported Iterator: " + iterator.toString(), exception.getMessage()); - } - - public void testNextDoc() throws IOException { - SortedNumericDocValues iterator = Mockito.mock(SortedNumericDocValues.class); - SequentialDocValuesIterator sequentialDocValuesIterator = new SequentialDocValuesIterator(iterator); - when(iterator.nextDoc()).thenReturn(5); - - int result = starTreeDocValuesIteratorAdapter.nextDoc(sequentialDocValuesIterator, 5); - assertEquals(5, result); - } - - public void test_multipleCoordinatedDocumentReader() throws IOException { - SortedNumericDocValues iterator1 = Mockito.mock(SortedNumericDocValues.class); - SortedNumericDocValues iterator2 = Mockito.mock(SortedNumericDocValues.class); - - SequentialDocValuesIterator sequentialDocValuesIterator1 = new SequentialDocValuesIterator(iterator1); - SequentialDocValuesIterator sequentialDocValuesIterator2 = new SequentialDocValuesIterator(iterator2); - - when(iterator1.nextDoc()).thenReturn(0); - when(iterator2.nextDoc()).thenReturn(1); - - when(iterator1.nextValue()).thenReturn(9L); - when(iterator2.nextValue()).thenReturn(9L); - - starTreeDocValuesIteratorAdapter.nextDoc(sequentialDocValuesIterator1, 0); - starTreeDocValuesIteratorAdapter.nextDoc(sequentialDocValuesIterator2, 0); - assertEquals(0, sequentialDocValuesIterator1.getDocId()); - assertEquals(9L, (long) sequentialDocValuesIterator1.getDocValue()); - assertNotEquals(0, sequentialDocValuesIterator2.getDocId()); - assertEquals(1, sequentialDocValuesIterator2.getDocId()); - assertEquals(9L, (long) sequentialDocValuesIterator2.getDocValue()); - - } - -} diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreesBuilderTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreesBuilderTests.java index 518c6729c2e1a..564ab110fa7a5 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreesBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreesBuilderTests.java @@ -88,16 +88,16 @@ public void setUp() throws Exception { public void test_buildWithNoStarTreeFields() throws IOException { when(mapperService.getCompositeFieldTypes()).thenReturn(new HashSet<>()); - StarTreesBuilder starTreesBuilder = new StarTreesBuilder(fieldProducerMap, segmentWriteState, mapperService); - starTreesBuilder.build(); + StarTreesBuilder starTreesBuilder = new StarTreesBuilder(segmentWriteState, mapperService); + starTreesBuilder.build(fieldProducerMap); verifyNoInteractions(docValuesProducer); } public void test_getStarTreeBuilder() throws IOException { when(mapperService.getCompositeFieldTypes()).thenReturn(Set.of(starTreeFieldType)); - StarTreesBuilder starTreesBuilder = new StarTreesBuilder(fieldProducerMap, segmentWriteState, mapperService); - StarTreeBuilder starTreeBuilder = starTreesBuilder.getStarTreeBuilder(starTreeField, fieldProducerMap, segmentWriteState, mapperService); + StarTreesBuilder starTreesBuilder = new StarTreesBuilder(segmentWriteState, mapperService); + StarTreeBuilder starTreeBuilder = starTreesBuilder.getSingleTreeBuilder(starTreeField, segmentWriteState, mapperService); assertTrue(starTreeBuilder instanceof OnHeapStarTreeBuilder); } @@ -105,8 +105,8 @@ public void test_getStarTreeBuilder_illegalArgument() { when(mapperService.getCompositeFieldTypes()).thenReturn(Set.of(starTreeFieldType)); StarTreeFieldConfiguration starTreeFieldConfiguration = new StarTreeFieldConfiguration(1, new HashSet<>(), StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP); StarTreeField starTreeField = new StarTreeField("star_tree", new ArrayList<>(), new ArrayList<>(), starTreeFieldConfiguration); - StarTreesBuilder starTreesBuilder = new StarTreesBuilder(fieldProducerMap, segmentWriteState, mapperService); - assertThrows(IllegalArgumentException.class, () -> starTreesBuilder.getStarTreeBuilder(starTreeField, fieldProducerMap, segmentWriteState, mapperService)); + StarTreesBuilder starTreesBuilder = new StarTreesBuilder(segmentWriteState, mapperService); + assertThrows(IllegalArgumentException.class, () -> starTreesBuilder.getSingleTreeBuilder(starTreeField, segmentWriteState, mapperService)); } public void test_closeWithNoStarTreeFields() throws IOException { @@ -118,7 +118,7 @@ public void test_closeWithNoStarTreeFields() throws IOException { StarTreeField starTreeField = new StarTreeField("star_tree", new ArrayList<>(), new ArrayList<>(), starTreeFieldConfiguration); starTreeFieldType = new StarTreeMapper.StarTreeFieldType("star_tree", starTreeField); when(mapperService.getCompositeFieldTypes()).thenReturn(Set.of(starTreeFieldType)); - StarTreesBuilder starTreesBuilder = new StarTreesBuilder(fieldProducerMap, segmentWriteState, mapperService); + StarTreesBuilder starTreesBuilder = new StarTreesBuilder(segmentWriteState, mapperService); starTreesBuilder.close(); verifyNoInteractions(docValuesProducer); diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/utils/SequentialDocValuesIteratorTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/utils/SequentialDocValuesIteratorTests.java index 76b612e3677f7..dfc83125b2806 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/utils/SequentialDocValuesIteratorTests.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/utils/SequentialDocValuesIteratorTests.java @@ -8,39 +8,126 @@ package org.opensearch.index.compositeindex.datacube.startree.utils; +import org.apache.lucene.codecs.DocValuesProducer; +import org.apache.lucene.index.BinaryDocValues; +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.SortedNumericDocValues; -import org.opensearch.index.fielddata.AbstractNumericDocValues; +import org.apache.lucene.index.VectorEncoding; +import org.apache.lucene.index.VectorSimilarityFunction; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.util.BytesRef; import org.opensearch.test.OpenSearchTestCase; +import org.junit.BeforeClass; import java.io.IOException; +import java.util.Collections; + +import org.mockito.Mockito; + +import static org.mockito.Mockito.when; public class SequentialDocValuesIteratorTests extends OpenSearchTestCase { - public void test_sequentialDocValuesIterator() { - SequentialDocValuesIterator sequentialDocValuesIterator = new SequentialDocValuesIterator(new AbstractNumericDocValues() { - @Override - public long longValue() throws IOException { - return 0; - } - - @Override - public boolean advanceExact(int i) throws IOException { - return false; - } - - @Override - public int docID() { - return 0; - } + private static FieldInfo mockFieldInfo; + + @BeforeClass + public static void setup() { + mockFieldInfo = new FieldInfo( + "field", + 1, + false, + false, + true, + IndexOptions.NONE, + DocValuesType.NONE, + -1, + Collections.emptyMap(), + 0, + 0, + 0, + 0, + VectorEncoding.FLOAT32, + VectorSimilarityFunction.EUCLIDEAN, + false, + false + ); + } + + public void testCreateIterator_SortedNumeric() throws IOException { + DocValuesProducer producer = Mockito.mock(DocValuesProducer.class); + SortedNumericDocValues iterator = Mockito.mock(SortedNumericDocValues.class); + when(producer.getSortedNumeric(mockFieldInfo)).thenReturn(iterator); + SequentialDocValuesIterator result = new SequentialDocValuesIterator(producer.getSortedNumeric(mockFieldInfo)); + assertEquals(iterator.getClass(), result.getDocIdSetIterator().getClass()); + } + + public void testCreateIterator_UnsupportedType() throws IOException { + DocValuesProducer producer = Mockito.mock(DocValuesProducer.class); + BinaryDocValues iterator = Mockito.mock(BinaryDocValues.class); + when(producer.getBinary(mockFieldInfo)).thenReturn(iterator); + SequentialDocValuesIterator result = new SequentialDocValuesIterator(producer.getBinary(mockFieldInfo)); + assertEquals(iterator.getClass(), result.getDocIdSetIterator().getClass()); + when(iterator.nextDoc()).thenReturn(0); + when(iterator.binaryValue()).thenReturn(new BytesRef("123")); + + IllegalStateException exception = expectThrows(IllegalStateException.class, () -> { + result.nextDoc(0); + result.value(0); }); + assertEquals("Unsupported Iterator requested for SequentialDocValuesIterator", exception.getMessage()); + } + + public void testGetNextValue_SortedNumeric() throws IOException { + SortedNumericDocValues iterator = Mockito.mock(SortedNumericDocValues.class); + when(iterator.nextDoc()).thenReturn(0); + when(iterator.nextValue()).thenReturn(123L); + SequentialDocValuesIterator sequentialDocValuesIterator = new SequentialDocValuesIterator(iterator); + sequentialDocValuesIterator.nextDoc(0); + long result = sequentialDocValuesIterator.value(0); + assertEquals(123L, result); + } + + public void testGetNextValue_UnsupportedIterator() { + DocIdSetIterator iterator = Mockito.mock(DocIdSetIterator.class); + SequentialDocValuesIterator sequentialDocValuesIterator = new SequentialDocValuesIterator(iterator); + + IllegalStateException exception = expectThrows(IllegalStateException.class, () -> { sequentialDocValuesIterator.value(0); }); + assertEquals("Unsupported Iterator requested for SequentialDocValuesIterator", exception.getMessage()); + } + + public void testNextDoc() throws IOException { + SortedNumericDocValues iterator = Mockito.mock(SortedNumericDocValues.class); + SequentialDocValuesIterator sequentialDocValuesIterator = new SequentialDocValuesIterator(iterator); + when(iterator.nextDoc()).thenReturn(5); - assertTrue(sequentialDocValuesIterator.getDocIdSetIterator() instanceof AbstractNumericDocValues); - assertEquals(sequentialDocValuesIterator.getDocId(), 0); + int result = sequentialDocValuesIterator.nextDoc(5); + assertEquals(5, result); } - public void test_sequentialDocValuesIterator_default() { - SequentialDocValuesIterator sequentialDocValuesIterator = new SequentialDocValuesIterator(); - assertTrue(sequentialDocValuesIterator.getDocIdSetIterator() instanceof SortedNumericDocValues); + public void test_multipleCoordinatedDocumentReader() throws IOException { + SortedNumericDocValues iterator1 = Mockito.mock(SortedNumericDocValues.class); + SortedNumericDocValues iterator2 = Mockito.mock(SortedNumericDocValues.class); + + SequentialDocValuesIterator sequentialDocValuesIterator1 = new SequentialDocValuesIterator(iterator1); + SequentialDocValuesIterator sequentialDocValuesIterator2 = new SequentialDocValuesIterator(iterator2); + + when(iterator1.nextDoc()).thenReturn(0); + when(iterator2.nextDoc()).thenReturn(1); + + when(iterator1.nextValue()).thenReturn(9L); + when(iterator2.nextValue()).thenReturn(9L); + + sequentialDocValuesIterator1.nextDoc(0); + sequentialDocValuesIterator2.nextDoc(0); + assertEquals(0, sequentialDocValuesIterator1.getDocId()); + assertEquals(9L, (long) sequentialDocValuesIterator1.value(0)); + assertNull(sequentialDocValuesIterator2.value(0)); + assertNotEquals(0, sequentialDocValuesIterator2.getDocId()); + assertEquals(1, sequentialDocValuesIterator2.getDocId()); + assertEquals(9L, (long) sequentialDocValuesIterator2.value(1)); + } } diff --git a/test/framework/src/main/java/org/opensearch/index/MapperTestUtils.java b/test/framework/src/main/java/org/opensearch/index/MapperTestUtils.java index 108492c1cf8f9..302180fcf95df 100644 --- a/test/framework/src/main/java/org/opensearch/index/MapperTestUtils.java +++ b/test/framework/src/main/java/org/opensearch/index/MapperTestUtils.java @@ -38,6 +38,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; +import org.opensearch.index.analysis.AnalysisTestsHelper; import org.opensearch.index.analysis.IndexAnalyzers; import org.opensearch.index.mapper.DocumentMapper; import org.opensearch.index.mapper.DocumentMapperParser; @@ -46,6 +47,7 @@ import org.opensearch.index.similarity.SimilarityService; import org.opensearch.indices.IndicesModule; import org.opensearch.indices.mapper.MapperRegistry; +import org.opensearch.plugins.AnalysisPlugin; import org.opensearch.test.IndexSettingsModule; import java.io.IOException; @@ -97,6 +99,38 @@ public static MapperService newMapperService( ); } + public static MapperService newMapperServiceWithHelperAnalyzer( + NamedXContentRegistry xContentRegistry, + Path tempDir, + Settings settings, + IndicesModule indicesModule, + String indexName + ) throws IOException { + Settings.Builder settingsBuilder = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), tempDir).put(settings); + if (settings.get(IndexMetadata.SETTING_VERSION_CREATED) == null) { + settingsBuilder.put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT); + } + Settings finalSettings = settingsBuilder.build(); + MapperRegistry mapperRegistry = indicesModule.getMapperRegistry(); + IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexName, finalSettings); + IndexAnalyzers indexAnalyzers = createMockTestAnalysis(finalSettings); + SimilarityService similarityService = new SimilarityService(indexSettings, null, Collections.emptyMap()); + return new MapperService( + indexSettings, + indexAnalyzers, + xContentRegistry, + similarityService, + mapperRegistry, + () -> null, + () -> false, + null + ); + } + + public static IndexAnalyzers createMockTestAnalysis(Settings nodeSettings, AnalysisPlugin... analysisPlugins) throws IOException { + return AnalysisTestsHelper.createTestAnalysisFromSettings(nodeSettings, analysisPlugins).indexAnalyzers; + } + public static void assertConflicts(String mapping1, String mapping2, DocumentMapperParser parser, String... conflicts) throws IOException { DocumentMapper docMapper = parser.parse("type", new CompressedXContent(mapping1)); From e749424db053ad31db1c4f1ab9374251ca9b737d Mon Sep 17 00:00:00 2001 From: Rishabh Singh Date: Tue, 23 Jul 2024 20:24:35 -0700 Subject: [PATCH 09/68] Security fixes and updates (#14928) Signed-off-by: Rishabh Singh --- .github/workflows/add-performance-comment.yml | 5 ++- .github/workflows/benchmark-pull-request.yml | 34 +++++++++---------- 2 files changed, 21 insertions(+), 18 deletions(-) diff --git a/.github/workflows/add-performance-comment.yml b/.github/workflows/add-performance-comment.yml index b522d348c84b2..fc272714c5628 100644 --- a/.github/workflows/add-performance-comment.yml +++ b/.github/workflows/add-performance-comment.yml @@ -6,7 +6,10 @@ on: jobs: add-comment: - if: github.event.label.name == 'Performance' + if: | + github.event.label.name == 'Performance' || + github.event.label.name == 'Search:Performance' || + github.event.label.name == 'Indexing:Performance' runs-on: ubuntu-latest permissions: pull-requests: write diff --git a/.github/workflows/benchmark-pull-request.yml b/.github/workflows/benchmark-pull-request.yml index 9d83331e81d5a..47abcc1178572 100644 --- a/.github/workflows/benchmark-pull-request.yml +++ b/.github/workflows/benchmark-pull-request.yml @@ -77,18 +77,6 @@ jobs: run: | echo "Invalid comment format detected. Failing the workflow." exit 1 - - id: get_approvers - run: | - echo "approvers=$(cat .github/CODEOWNERS | grep '^\*' | tr -d '* ' | sed 's/@/,/g' | sed 's/,//1')" >> $GITHUB_OUTPUT - - uses: trstringer/manual-approval@v1 - if: (!contains(steps.get_approvers.outputs.approvers, github.event.comment.user.login)) - with: - secret: ${{ github.TOKEN }} - approvers: ${{ steps.get_approvers.outputs.approvers }} - minimum-approvals: 1 - issue-title: 'Request to approve/deny benchmark run for PR #${{ env.PR_NUMBER }}' - issue-body: "Please approve or deny the benchmark run for PR #${{ env.PR_NUMBER }}" - exclude-workflow-initiator-as-approver: false - name: Get PR Details id: get_pr uses: actions/github-script@v7 @@ -106,21 +94,33 @@ jobs: return { "headRepoFullName": pull_request.head.repo.full_name, - "headRef": pull_request.head.ref + "headRefSha": pull_request.head.sha }; - name: Set pr details env vars run: | echo '${{ steps.get_pr.outputs.result }}' | jq -r '.headRepoFullName' - echo '${{ steps.get_pr.outputs.result }}' | jq -r '.headRef' + echo '${{ steps.get_pr.outputs.result }}' | jq -r '.headRefSha' headRepo=$(echo '${{ steps.get_pr.outputs.result }}' | jq -r '.headRepoFullName') - headRef=$(echo '${{ steps.get_pr.outputs.result }}' | jq -r '.headRef') + headRefSha=$(echo '${{ steps.get_pr.outputs.result }}' | jq -r '.headRefSha') echo "prHeadRepo=$headRepo" >> $GITHUB_ENV - echo "prHeadRef=$headRef" >> $GITHUB_ENV + echo "prHeadRefSha=$headRefSha" >> $GITHUB_ENV + - id: get_approvers + run: | + echo "approvers=$(cat .github/CODEOWNERS | grep '^\*' | tr -d '* ' | sed 's/@/,/g' | sed 's/,//1')" >> $GITHUB_OUTPUT + - uses: trstringer/manual-approval@v1 + if: (!contains(steps.get_approvers.outputs.approvers, github.event.comment.user.login)) + with: + secret: ${{ github.TOKEN }} + approvers: ${{ steps.get_approvers.outputs.approvers }} + minimum-approvals: 1 + issue-title: 'Request to approve/deny benchmark run for PR #${{ env.PR_NUMBER }}' + issue-body: "Please approve or deny the benchmark run for PR #${{ env.PR_NUMBER }}" + exclude-workflow-initiator-as-approver: false - name: Checkout PR Repo uses: actions/checkout@v4 with: repository: ${{ env.prHeadRepo }} - ref: ${{ env.prHeadRef }} + ref: ${{ env.prHeadRefSha }} token: ${{ secrets.GITHUB_TOKEN }} - name: Setup Java uses: actions/setup-java@v1 From 2def4fd302b71a6d3ed2ce3efc3cce6800fbdd3f Mon Sep 17 00:00:00 2001 From: Sooraj Sinha <81695996+soosinha@users.noreply.github.com> Date: Wed, 24 Jul 2024 10:36:06 +0530 Subject: [PATCH 10/68] Create new IndexInput for multi part upload (#14888) * Create new IndexInput for multi part upload Signed-off-by: Sooraj Sinha --- .../transfer/BlobStoreTransferService.java | 35 ++++++++-------- .../blobstore/ChecksumBlobStoreFormat.java | 35 ++++++++-------- .../blobstore/ConfigBlobStoreFormat.java | 40 +++++++++++-------- .../BlobStoreTransferServiceTests.java | 30 +++++++++++++- 4 files changed, 86 insertions(+), 54 deletions(-) diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java b/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java index d55abb40dec48..22bb4cf0514bf 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java @@ -131,20 +131,18 @@ public void uploadBlob( } final String resourceDescription = "BlobStoreTransferService.uploadBlob(blob=\"" + fileName + "\")"; byte[] bytes = inputStream.readAllBytes(); - try (IndexInput input = new ByteArrayIndexInput(resourceDescription, bytes)) { - long expectedChecksum = computeChecksum(input, resourceDescription); - uploadBlobAsyncInternal( - fileName, - fileName, - bytes.length, - blobPath, - writePriority, - (size, position) -> new OffsetRangeIndexInputStream(input, size, position), - expectedChecksum, - listener, - null - ); - } + long expectedChecksum = computeChecksum(bytes, resourceDescription); + uploadBlobAsyncInternal( + fileName, + fileName, + bytes.length, + blobPath, + writePriority, + (size, position) -> new OffsetRangeIndexInputStream(new ByteArrayIndexInput(resourceDescription, bytes), size, position), + expectedChecksum, + listener, + null + ); } // Builds a metadata map containing the Base64-encoded checkpoint file data associated with a translog file. @@ -220,7 +218,8 @@ private void uploadBlob( } - private void uploadBlobAsyncInternal( + // package private for testing + void uploadBlobAsyncInternal( String fileName, String remoteFileName, long contentLength, @@ -335,10 +334,10 @@ public void listAllInSortedOrderAsync( threadPool.executor(threadpoolName).execute(() -> { listAllInSortedOrder(path, filenamePrefix, limit, listener); }); } - private static long computeChecksum(IndexInput indexInput, String resourceDescription) throws ChecksumCombinationException { + private static long computeChecksum(byte[] bytes, String resourceDescription) throws ChecksumCombinationException { long expectedChecksum; - try { - expectedChecksum = checksumOfChecksum(indexInput.clone(), CHECKSUM_BYTES_LENGTH); + try (IndexInput indexInput = new ByteArrayIndexInput(resourceDescription, bytes)) { + expectedChecksum = checksumOfChecksum(indexInput, CHECKSUM_BYTES_LENGTH); } catch (Exception e) { throw new ChecksumCombinationException( "Potentially corrupted file: Checksum combination failed while combining stored checksum " diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/ChecksumBlobStoreFormat.java b/server/src/main/java/org/opensearch/repositories/blobstore/ChecksumBlobStoreFormat.java index e567e1b626c5a..3a49fed4be282 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/ChecksumBlobStoreFormat.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/ChecksumBlobStoreFormat.java @@ -223,10 +223,11 @@ private void writeAsyncWithPriority( return; } final String blobName = blobName(name); - final BytesReference bytes = serialize(obj, blobName, compressor, params); + final BytesReference bytesReference = serialize(obj, blobName, compressor, params); final String resourceDescription = "ChecksumBlobStoreFormat.writeAsyncWithPriority(blob=\"" + blobName + "\")"; - try (IndexInput input = new ByteArrayIndexInput(resourceDescription, BytesReference.toBytes(bytes))) { - long expectedChecksum; + byte[] bytes = BytesReference.toBytes(bytesReference); + long expectedChecksum; + try (IndexInput input = new ByteArrayIndexInput(resourceDescription, bytes)) { try { expectedChecksum = checksumOfChecksum(input.clone(), 8); } catch (Exception e) { @@ -237,21 +238,21 @@ private void writeAsyncWithPriority( e ); } + } - try ( - RemoteTransferContainer remoteTransferContainer = new RemoteTransferContainer( - blobName, - blobName, - bytes.length(), - true, - priority, - (size, position) -> new OffsetRangeIndexInputStream(input, size, position), - expectedChecksum, - ((AsyncMultiStreamBlobContainer) blobContainer).remoteIntegrityCheckSupported() - ) - ) { - ((AsyncMultiStreamBlobContainer) blobContainer).asyncBlobUpload(remoteTransferContainer.createWriteContext(), listener); - } + try ( + RemoteTransferContainer remoteTransferContainer = new RemoteTransferContainer( + blobName, + blobName, + bytes.length, + true, + priority, + (size, position) -> new OffsetRangeIndexInputStream(new ByteArrayIndexInput(resourceDescription, bytes), size, position), + expectedChecksum, + ((AsyncMultiStreamBlobContainer) blobContainer).remoteIntegrityCheckSupported() + ) + ) { + ((AsyncMultiStreamBlobContainer) blobContainer).asyncBlobUpload(remoteTransferContainer.createWriteContext(), listener); } } diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/ConfigBlobStoreFormat.java b/server/src/main/java/org/opensearch/repositories/blobstore/ConfigBlobStoreFormat.java index 18c718ca2110e..8127bf8c2a2a2 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/ConfigBlobStoreFormat.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/ConfigBlobStoreFormat.java @@ -8,7 +8,6 @@ package org.opensearch.repositories.blobstore; -import org.apache.lucene.store.IndexInput; import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.stream.write.WritePriority; @@ -51,23 +50,30 @@ public void writeAsyncWithUrgentPriority(T obj, BlobContainer blobContainer, Str return; } String blobName = blobName(name); - BytesReference bytes = serialize(obj, blobName, new NoneCompressor(), ToXContent.EMPTY_PARAMS, XContentType.JSON, null, null); + BytesReference bytesReference = serialize( + obj, + blobName, + new NoneCompressor(), + ToXContent.EMPTY_PARAMS, + XContentType.JSON, + null, + null + ); String resourceDescription = "BlobStoreFormat.writeAsyncWithPriority(blob=\"" + blobName + "\")"; - try (IndexInput input = new ByteArrayIndexInput(resourceDescription, BytesReference.toBytes(bytes))) { - try ( - RemoteTransferContainer remoteTransferContainer = new RemoteTransferContainer( - blobName, - blobName, - bytes.length(), - true, - WritePriority.URGENT, - (size, position) -> new OffsetRangeIndexInputStream(input, size, position), - null, - false - ) - ) { - ((AsyncMultiStreamBlobContainer) blobContainer).asyncBlobUpload(remoteTransferContainer.createWriteContext(), listener); - } + byte[] bytes = BytesReference.toBytes(bytesReference); + try ( + RemoteTransferContainer remoteTransferContainer = new RemoteTransferContainer( + blobName, + blobName, + bytes.length, + true, + WritePriority.URGENT, + (size, position) -> new OffsetRangeIndexInputStream(new ByteArrayIndexInput(resourceDescription, bytes), size, position), + null, + false + ) + ) { + ((AsyncMultiStreamBlobContainer) blobContainer).asyncBlobUpload(remoteTransferContainer.createWriteContext(), listener); } } } diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceTests.java index cd78aead80923..10e4cc6cfb1ef 100644 --- a/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceTests.java +++ b/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceTests.java @@ -22,6 +22,8 @@ import org.opensearch.common.blobstore.stream.read.ReadContext; import org.opensearch.common.blobstore.stream.write.WriteContext; import org.opensearch.common.blobstore.stream.write.WritePriority; +import org.opensearch.common.blobstore.transfer.RemoteTransferContainer; +import org.opensearch.common.blobstore.transfer.stream.OffsetRangeInputStream; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.core.action.ActionListener; @@ -54,9 +56,13 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; + import static org.opensearch.index.translog.transfer.TranslogTransferManager.CHECKPOINT_FILE_DATA_KEY; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class BlobStoreTransferServiceTests extends OpenSearchTestCase { @@ -139,8 +145,28 @@ public void testUploadBlobFromInputStreamAsyncFSRepo() throws IOException, Inter FsBlobStore fsBlobStore = mock(FsBlobStore.class); when(fsBlobStore.blobContainer(any())).thenReturn(mockAsyncFsContainer); - TransferService transferService = new BlobStoreTransferService(fsBlobStore, threadPool); - uploadBlobFromInputStream(transferService); + BlobStoreTransferService transferServiceSpy = Mockito.spy(new BlobStoreTransferService(fsBlobStore, threadPool)); + uploadBlobFromInputStream(transferServiceSpy); + + ArgumentCaptor inputStreamCaptor = ArgumentCaptor.forClass( + RemoteTransferContainer.OffsetRangeInputStreamSupplier.class + ); + verify(transferServiceSpy).uploadBlobAsyncInternal( + Mockito.anyString(), + Mockito.anyString(), + Mockito.anyLong(), + Mockito.any(), + Mockito.any(), + inputStreamCaptor.capture(), + Mockito.anyLong(), + Mockito.any(), + Mockito.any() + ); + RemoteTransferContainer.OffsetRangeInputStreamSupplier inputStreamSupplier = inputStreamCaptor.getValue(); + OffsetRangeInputStream inputStream1 = inputStreamSupplier.get(1, 0); + OffsetRangeInputStream inputStream2 = inputStreamSupplier.get(1, 2); + assertNotEquals(inputStream1, inputStream2); + assertNotEquals(inputStream1.getFilePointer(), inputStream2.getFilePointer()); } private IndexMetadata getIndexMetadata() { From 7673a7733ccecc8730e8a3ecff898b72dc3deaa6 Mon Sep 17 00:00:00 2001 From: Pranshu Shukla <55992439+Pranshu-S@users.noreply.github.com> Date: Wed, 24 Jul 2024 10:54:22 +0530 Subject: [PATCH 11/68] Updating Cluster Stats Optimisation Versions to 2.16 (#14914) * Updating Cluster Stats Optimisation Versions to 2.16 Signed-off-by: Pranshu Shukla --- .../action/admin/cluster/stats/ClusterStatsNodeResponse.java | 4 ++-- .../action/admin/cluster/stats/ClusterStatsRequest.java | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java index 133cf68f5f8c9..6ed3ca7c409e7 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java @@ -77,7 +77,7 @@ public ClusterStatsNodeResponse(StreamInput in) throws IOException { } this.nodeInfo = new NodeInfo(in); this.nodeStats = new NodeStats(in); - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_16_0)) { this.shardsStats = in.readOptionalArray(ShardStats::new, ShardStats[]::new); this.aggregatedNodeLevelStats = in.readOptionalWriteable(AggregatedNodeLevelStats::new); } else { @@ -156,7 +156,7 @@ public void writeTo(StreamOutput out) throws IOException { } nodeInfo.writeTo(out); nodeStats.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_16_0)) { if (aggregatedNodeLevelStats != null) { out.writeOptionalArray(null); out.writeOptionalWriteable(aggregatedNodeLevelStats); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequest.java index fdeb82a3466f2..bd75b2210e474 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequest.java @@ -50,7 +50,7 @@ public class ClusterStatsRequest extends BaseNodesRequest { public ClusterStatsRequest(StreamInput in) throws IOException { super(in); - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_16_0)) { useAggregatedNodeLevelResponses = in.readOptionalBoolean(); } } @@ -76,7 +76,7 @@ public void useAggregatedNodeLevelResponses(boolean useAggregatedNodeLevelRespon @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_16_0)) { out.writeOptionalBoolean(useAggregatedNodeLevelResponses); } } From 5744eae80dfe466397f4254acf995794855db370 Mon Sep 17 00:00:00 2001 From: shailendra0811 <167273922+shailendra0811@users.noreply.github.com> Date: Wed, 24 Jul 2024 14:59:30 +0530 Subject: [PATCH 12/68] Fix read/write method for Diff Manifest in case Shard diff file is null. (#14938) Signed-off-by: Shailendra Singh --- .../gateway/remote/ClusterStateDiffManifest.java | 8 ++++---- .../opensearch/gateway/remote/RemotePersistenceStats.java | 4 ++-- .../remote/RemoteClusterStateCleanupManagerTests.java | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/server/src/main/java/org/opensearch/gateway/remote/ClusterStateDiffManifest.java b/server/src/main/java/org/opensearch/gateway/remote/ClusterStateDiffManifest.java index ab7fa1fddf4bf..a3b36ddcff1a7 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/ClusterStateDiffManifest.java +++ b/server/src/main/java/org/opensearch/gateway/remote/ClusterStateDiffManifest.java @@ -129,7 +129,6 @@ public ClusterStateDiffManifest( clusterStateCustomUpdated = new ArrayList<>(clusterStateCustomDiff.getDiffs().keySet()); clusterStateCustomUpdated.addAll(clusterStateCustomDiff.getUpserts().keySet()); clusterStateCustomDeleted = clusterStateCustomDiff.getDeletes(); - List indicie1s = indicesRoutingUpdated; } public ClusterStateDiffManifest( @@ -190,7 +189,7 @@ public ClusterStateDiffManifest(StreamInput in) throws IOException { this.hashesOfConsistentSettingsUpdated = in.readBoolean(); this.clusterStateCustomUpdated = in.readStringList(); this.clusterStateCustomDeleted = in.readStringList(); - this.indicesRoutingDiffPath = in.readString(); + this.indicesRoutingDiffPath = in.readOptionalString(); } @Override @@ -535,7 +534,8 @@ public int hashCode() { indicesRoutingDeleted, hashesOfConsistentSettingsUpdated, clusterStateCustomUpdated, - clusterStateCustomDeleted + clusterStateCustomDeleted, + indicesRoutingDiffPath ); } @@ -562,7 +562,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(hashesOfConsistentSettingsUpdated); out.writeStringCollection(clusterStateCustomUpdated); out.writeStringCollection(clusterStateCustomDeleted); - out.writeString(indicesRoutingDiffPath); + out.writeOptionalString(indicesRoutingDiffPath); } /** diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemotePersistenceStats.java b/server/src/main/java/org/opensearch/gateway/remote/RemotePersistenceStats.java index efd73e11e46b5..1e7f8f278fb0f 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemotePersistenceStats.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemotePersistenceStats.java @@ -51,10 +51,10 @@ public long getIndexRoutingFilesCleanupAttemptFailedCount() { } public void indicesRoutingDiffFileCleanupAttemptFailed() { - indexRoutingFilesCleanupAttemptFailedCount.incrementAndGet(); + indicesRoutingDiffFilesCleanupAttemptFailedCount.incrementAndGet(); } public long getIndicesRoutingDiffFileCleanupAttemptFailedCount() { - return indexRoutingFilesCleanupAttemptFailedCount.get(); + return indicesRoutingDiffFilesCleanupAttemptFailedCount.get(); } } diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManagerTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManagerTests.java index b86f23f3d37aa..920a48f02b99a 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManagerTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManagerTests.java @@ -652,7 +652,7 @@ public void testIndicesRoutingDiffFilesCleanupFailureStats() throws Exception { assertEquals(0, remoteClusterStateCleanupManager.getStats().getIndicesRoutingDiffFileCleanupAttemptFailedCount()); }); - doThrow(IOException.class).when(remoteRoutingTableService).deleteStaleIndexRoutingPaths(any()); + doThrow(IOException.class).when(remoteRoutingTableService).deleteStaleIndexRoutingDiffPaths(any()); remoteClusterStateCleanupManager.deleteClusterMetadata(clusterName, clusterUUID, activeBlobs, inactiveBlobs); assertBusy(() -> { // wait for stats to get updated From 2a14c2772cc53bf2941e80c911307eaaacca055d Mon Sep 17 00:00:00 2001 From: Bukhtawar Khan Date: Wed, 24 Jul 2024 17:23:55 +0530 Subject: [PATCH 13/68] Make reroute iteration time-bound for large shard allocations (#14848) * Make reroute iteration time-bound for large shard allocations Signed-off-by: Bukhtawar Khan Co-authored-by: Rishab Nahata --- CHANGELOG.md | 1 + .../gateway/RecoveryFromGatewayIT.java | 128 +++++++++++++++++- .../routing/allocation/AllocationService.java | 5 +- .../allocation/ExistingShardsAllocator.java | 7 +- .../common/settings/ClusterSettings.java | 2 + .../common/util/BatchRunnableExecutor.java | 66 +++++++++ .../util/concurrent/TimeoutAwareRunnable.java | 19 +++ .../gateway/BaseGatewayShardAllocator.java | 21 +++ .../gateway/ShardsBatchGatewayAllocator.java | 86 ++++++++++-- .../ExistingShardsAllocatorTests.java | 118 ++++++++++++++++ .../util/BatchRunnableExecutorTests.java | 97 +++++++++++++ .../gateway/GatewayAllocatorTests.java | 32 +++++ .../PrimaryShardBatchAllocatorTests.java | 47 +++++++ .../ReplicaShardBatchAllocatorTests.java | 27 ++++ .../TestShardBatchGatewayAllocator.java | 5 +- 15 files changed, 645 insertions(+), 16 deletions(-) create mode 100644 server/src/main/java/org/opensearch/common/util/BatchRunnableExecutor.java create mode 100644 server/src/main/java/org/opensearch/common/util/concurrent/TimeoutAwareRunnable.java create mode 100644 server/src/test/java/org/opensearch/cluster/routing/allocation/ExistingShardsAllocatorTests.java create mode 100644 server/src/test/java/org/opensearch/common/util/BatchRunnableExecutorTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 6aa3d7a58dda4..edc0ca2732f25 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -62,6 +62,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Allow @InternalApi annotation on classes not meant to be constructed outside of the OpenSearch core ([#14575](https://github.com/opensearch-project/OpenSearch/pull/14575)) - Add @InternalApi annotation to japicmp exclusions ([#14597](https://github.com/opensearch-project/OpenSearch/pull/14597)) - Allow system index warning in OpenSearchRestTestCase.refreshAllIndices ([#14635](https://github.com/opensearch-project/OpenSearch/pull/14635)) +- Make reroute iteration time-bound for large shard allocations ([#14848](https://github.com/opensearch-project/OpenSearch/pull/14848)) ### Deprecated - Deprecate batch_size parameter on bulk API ([#14725](https://github.com/opensearch-project/OpenSearch/pull/14725)) diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java index 6296608c64d37..4085cc3890f30 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java @@ -769,7 +769,7 @@ public void testMessyElectionsStillMakeClusterGoGreen() throws Exception { ensureGreen("test"); } - public void testBatchModeEnabled() throws Exception { + public void testBatchModeEnabledWithoutTimeout() throws Exception { internalCluster().startClusterManagerOnlyNodes( 1, Settings.builder().put(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_BATCH_MODE.getKey(), true).build() @@ -810,6 +810,132 @@ public void testBatchModeEnabled() throws Exception { assertEquals(0, gatewayAllocator.getNumberOfInFlightFetches()); } + public void testBatchModeEnabledWithSufficientTimeoutAndClusterGreen() throws Exception { + internalCluster().startClusterManagerOnlyNodes( + 1, + Settings.builder() + .put(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_BATCH_MODE.getKey(), true) + .put(ShardsBatchGatewayAllocator.PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING.getKey(), "20s") + .put(ShardsBatchGatewayAllocator.REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING.getKey(), "20s") + .build() + ); + List dataOnlyNodes = internalCluster().startDataOnlyNodes(2); + createIndex( + "test", + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).build() + ); + ensureGreen("test"); + Settings node0DataPathSettings = internalCluster().dataPathSettings(dataOnlyNodes.get(0)); + Settings node1DataPathSettings = internalCluster().dataPathSettings(dataOnlyNodes.get(1)); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(dataOnlyNodes.get(0))); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(dataOnlyNodes.get(1))); + ensureRed("test"); + ensureStableCluster(1); + + logger.info("--> Now do a protective reroute"); + ClusterRerouteResponse clusterRerouteResponse = client().admin().cluster().prepareReroute().setRetryFailed(true).get(); + assertTrue(clusterRerouteResponse.isAcknowledged()); + + ShardsBatchGatewayAllocator gatewayAllocator = internalCluster().getInstance( + ShardsBatchGatewayAllocator.class, + internalCluster().getClusterManagerName() + ); + assertTrue(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_BATCH_MODE.get(internalCluster().clusterService().getSettings())); + assertEquals(1, gatewayAllocator.getNumberOfStartedShardBatches()); + assertEquals(1, gatewayAllocator.getNumberOfStoreShardBatches()); + + // Now start both data nodes and ensure batch mode is working + logger.info("--> restarting the stopped nodes"); + internalCluster().startDataOnlyNode(Settings.builder().put("node.name", dataOnlyNodes.get(0)).put(node0DataPathSettings).build()); + internalCluster().startDataOnlyNode(Settings.builder().put("node.name", dataOnlyNodes.get(1)).put(node1DataPathSettings).build()); + ensureStableCluster(3); + ensureGreen("test"); + assertEquals(0, gatewayAllocator.getNumberOfStartedShardBatches()); + assertEquals(0, gatewayAllocator.getNumberOfStoreShardBatches()); + assertEquals(0, gatewayAllocator.getNumberOfInFlightFetches()); + } + + public void testBatchModeEnabledWithInSufficientTimeoutButClusterGreen() throws Exception { + + internalCluster().startClusterManagerOnlyNodes( + 1, + Settings.builder().put(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_BATCH_MODE.getKey(), true).build() + ); + List dataOnlyNodes = internalCluster().startDataOnlyNodes(2); + createNIndices(50, "test"); // this will create 50p, 50r shards + ensureStableCluster(3); + IndicesStatsResponse indicesStats = dataNodeClient().admin().indices().prepareStats().get(); + assertThat(indicesStats.getSuccessfulShards(), equalTo(100)); + ClusterHealthResponse health = client().admin() + .cluster() + .health(Requests.clusterHealthRequest().waitForGreenStatus().timeout("1m")) + .actionGet(); + assertFalse(health.isTimedOut()); + assertEquals(GREEN, health.getStatus()); + + String clusterManagerName = internalCluster().getClusterManagerName(); + Settings clusterManagerDataPathSettings = internalCluster().dataPathSettings(clusterManagerName); + Settings node0DataPathSettings = internalCluster().dataPathSettings(dataOnlyNodes.get(0)); + Settings node1DataPathSettings = internalCluster().dataPathSettings(dataOnlyNodes.get(1)); + + internalCluster().stopCurrentClusterManagerNode(); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(dataOnlyNodes.get(0))); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(dataOnlyNodes.get(1))); + + // Now start cluster manager node and post that verify batches created + internalCluster().startClusterManagerOnlyNodes( + 1, + Settings.builder() + .put("node.name", clusterManagerName) + .put(clusterManagerDataPathSettings) + .put(ShardsBatchGatewayAllocator.GATEWAY_ALLOCATOR_BATCH_SIZE.getKey(), 5) + .put(ShardsBatchGatewayAllocator.PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING.getKey(), "10ms") + .put(ShardsBatchGatewayAllocator.REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING.getKey(), "10ms") + .put(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_BATCH_MODE.getKey(), true) + .build() + ); + ensureStableCluster(1); + + logger.info("--> Now do a protective reroute"); // to avoid any race condition in test + ClusterRerouteResponse clusterRerouteResponse = client().admin().cluster().prepareReroute().setRetryFailed(true).get(); + assertTrue(clusterRerouteResponse.isAcknowledged()); + + ShardsBatchGatewayAllocator gatewayAllocator = internalCluster().getInstance( + ShardsBatchGatewayAllocator.class, + internalCluster().getClusterManagerName() + ); + + assertTrue(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_BATCH_MODE.get(internalCluster().clusterService().getSettings())); + assertEquals(10, gatewayAllocator.getNumberOfStartedShardBatches()); + assertEquals(10, gatewayAllocator.getNumberOfStoreShardBatches()); + health = client(internalCluster().getClusterManagerName()).admin().cluster().health(Requests.clusterHealthRequest()).actionGet(); + assertFalse(health.isTimedOut()); + assertEquals(RED, health.getStatus()); + assertEquals(100, health.getUnassignedShards()); + assertEquals(0, health.getInitializingShards()); + assertEquals(0, health.getActiveShards()); + assertEquals(0, health.getRelocatingShards()); + assertEquals(0, health.getNumberOfDataNodes()); + + // Now start both data nodes and ensure batch mode is working + logger.info("--> restarting the stopped nodes"); + internalCluster().startDataOnlyNode(Settings.builder().put("node.name", dataOnlyNodes.get(0)).put(node0DataPathSettings).build()); + internalCluster().startDataOnlyNode(Settings.builder().put("node.name", dataOnlyNodes.get(1)).put(node1DataPathSettings).build()); + ensureStableCluster(3); + + // wait for cluster to turn green + health = client().admin().cluster().health(Requests.clusterHealthRequest().waitForGreenStatus().timeout("5m")).actionGet(); + assertFalse(health.isTimedOut()); + assertEquals(GREEN, health.getStatus()); + assertEquals(0, health.getUnassignedShards()); + assertEquals(0, health.getInitializingShards()); + assertEquals(100, health.getActiveShards()); + assertEquals(0, health.getRelocatingShards()); + assertEquals(2, health.getNumberOfDataNodes()); + assertEquals(0, gatewayAllocator.getNumberOfStartedShardBatches()); + assertEquals(0, gatewayAllocator.getNumberOfStoreShardBatches()); + } + public void testBatchModeDisabled() throws Exception { internalCluster().startClusterManagerOnlyNodes( 1, diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationService.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationService.java index 5ad3a2fd47ce3..e29a81a2c131f 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationService.java @@ -72,6 +72,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.function.Function; import java.util.stream.Collectors; @@ -617,10 +618,10 @@ private void allocateExistingUnassignedShards(RoutingAllocation allocation) { private void allocateAllUnassignedShards(RoutingAllocation allocation) { ExistingShardsAllocator allocator = existingShardsAllocators.get(ShardsBatchGatewayAllocator.ALLOCATOR_NAME); - allocator.allocateAllUnassignedShards(allocation, true); + Optional.ofNullable(allocator.allocateAllUnassignedShards(allocation, true)).ifPresent(Runnable::run); allocator.afterPrimariesBeforeReplicas(allocation); // Replicas Assignment - allocator.allocateAllUnassignedShards(allocation, false); + Optional.ofNullable(allocator.allocateAllUnassignedShards(allocation, false)).ifPresent(Runnable::run); } private void disassociateDeadNodes(RoutingAllocation allocation) { diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/ExistingShardsAllocator.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/ExistingShardsAllocator.java index fb2a37237f8b6..eb7a1e7209c37 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/ExistingShardsAllocator.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/ExistingShardsAllocator.java @@ -41,6 +41,7 @@ import org.opensearch.gateway.GatewayAllocator; import org.opensearch.gateway.ShardsBatchGatewayAllocator; +import java.util.ArrayList; import java.util.List; /** @@ -108,14 +109,16 @@ void allocateUnassigned( * * Allocation service will currently run the default implementation of it implemented by {@link ShardsBatchGatewayAllocator} */ - default void allocateAllUnassignedShards(RoutingAllocation allocation, boolean primary) { + default Runnable allocateAllUnassignedShards(RoutingAllocation allocation, boolean primary) { RoutingNodes.UnassignedShards.UnassignedIterator iterator = allocation.routingNodes().unassigned().iterator(); + List runnables = new ArrayList<>(); while (iterator.hasNext()) { ShardRouting shardRouting = iterator.next(); if (shardRouting.primary() == primary) { - allocateUnassigned(shardRouting, allocation, iterator); + runnables.add(() -> allocateUnassigned(shardRouting, allocation, iterator)); } } + return () -> runnables.forEach(Runnable::run); } /** diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 49801fd3834b8..2f60c731bc554 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -343,6 +343,8 @@ public void apply(Settings value, Settings current, Settings previous) { GatewayService.RECOVER_AFTER_NODES_SETTING, GatewayService.RECOVER_AFTER_TIME_SETTING, ShardsBatchGatewayAllocator.GATEWAY_ALLOCATOR_BATCH_SIZE, + ShardsBatchGatewayAllocator.PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING, + ShardsBatchGatewayAllocator.REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING, PersistedClusterStateService.SLOW_WRITE_LOGGING_THRESHOLD, NetworkModule.HTTP_DEFAULT_TYPE_SETTING, NetworkModule.TRANSPORT_DEFAULT_TYPE_SETTING, diff --git a/server/src/main/java/org/opensearch/common/util/BatchRunnableExecutor.java b/server/src/main/java/org/opensearch/common/util/BatchRunnableExecutor.java new file mode 100644 index 0000000000000..d3d3304cb909a --- /dev/null +++ b/server/src/main/java/org/opensearch/common/util/BatchRunnableExecutor.java @@ -0,0 +1,66 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.Randomness; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.TimeoutAwareRunnable; + +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; + +/** + * A {@link Runnable} that iteratively executes a batch of {@link TimeoutAwareRunnable}s. If the elapsed time exceeds the timeout defined by {@link TimeValue} timeout, then all subsequent {@link TimeoutAwareRunnable}s will have their {@link TimeoutAwareRunnable#onTimeout} method invoked and will not be run. + * + * @opensearch.internal + */ +public class BatchRunnableExecutor implements Runnable { + + private final Supplier timeoutSupplier; + + private final List timeoutAwareRunnables; + + private static final Logger logger = LogManager.getLogger(BatchRunnableExecutor.class); + + public BatchRunnableExecutor(List timeoutAwareRunnables, Supplier timeoutSupplier) { + this.timeoutSupplier = timeoutSupplier; + this.timeoutAwareRunnables = timeoutAwareRunnables; + } + + // for tests + public List getTimeoutAwareRunnables() { + return this.timeoutAwareRunnables; + } + + @Override + public void run() { + logger.debug("Starting execution of runnable of size [{}]", timeoutAwareRunnables.size()); + long startTime = System.nanoTime(); + if (timeoutAwareRunnables.isEmpty()) { + return; + } + Randomness.shuffle(timeoutAwareRunnables); + for (TimeoutAwareRunnable runnable : timeoutAwareRunnables) { + if (timeoutSupplier.get().nanos() < 0 || System.nanoTime() - startTime < timeoutSupplier.get().nanos()) { + runnable.run(); + } else { + logger.debug("Executing timeout for runnable of size [{}]", timeoutAwareRunnables.size()); + runnable.onTimeout(); + } + } + logger.debug( + "Time taken to execute timed runnables in this cycle:[{}ms]", + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime) + ); + } + +} diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/TimeoutAwareRunnable.java b/server/src/main/java/org/opensearch/common/util/concurrent/TimeoutAwareRunnable.java new file mode 100644 index 0000000000000..8d3357ad93095 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/util/concurrent/TimeoutAwareRunnable.java @@ -0,0 +1,19 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util.concurrent; + +/** + * Runnable that is aware of a timeout + * + * @opensearch.internal + */ +public interface TimeoutAwareRunnable extends Runnable { + + void onTimeout(); +} diff --git a/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java b/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java index 58982e869794f..0d6af943d39e0 100644 --- a/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java @@ -36,6 +36,7 @@ import org.apache.logging.log4j.Logger; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RoutingNode; +import org.opensearch.cluster.routing.RoutingNodes; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.allocation.AllocateUnassignedDecision; import org.opensearch.cluster.routing.allocation.AllocationDecision; @@ -43,9 +44,12 @@ import org.opensearch.cluster.routing.allocation.NodeAllocationResult; import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.cluster.routing.allocation.decider.Decision; +import org.opensearch.core.index.shard.ShardId; import java.util.ArrayList; +import java.util.HashSet; import java.util.List; +import java.util.Set; /** * An abstract class that implements basic functionality for allocating @@ -78,6 +82,23 @@ public void allocateUnassigned( executeDecision(shardRouting, allocateUnassignedDecision, allocation, unassignedAllocationHandler); } + protected void allocateUnassignedBatchOnTimeout(List shardRoutings, RoutingAllocation allocation, boolean primary) { + Set shardIdsFromBatch = new HashSet<>(); + for (ShardRouting shardRouting : shardRoutings) { + ShardId shardId = shardRouting.shardId(); + shardIdsFromBatch.add(shardId); + } + RoutingNodes.UnassignedShards.UnassignedIterator iterator = allocation.routingNodes().unassigned().iterator(); + while (iterator.hasNext()) { + ShardRouting unassignedShard = iterator.next(); + AllocateUnassignedDecision allocationDecision; + if (unassignedShard.primary() == primary && shardIdsFromBatch.contains(unassignedShard.shardId())) { + allocationDecision = AllocateUnassignedDecision.throttle(null); + executeDecision(unassignedShard, allocationDecision, allocation, iterator); + } + } + } + protected void executeDecision( ShardRouting shardRouting, AllocateUnassignedDecision allocateUnassignedDecision, diff --git a/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java b/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java index 3c0797cd450d2..55f5388d8f454 100644 --- a/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java @@ -27,9 +27,13 @@ import org.opensearch.common.UUIDs; import org.opensearch.common.inject.Inject; import org.opensearch.common.lease.Releasables; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.BatchRunnableExecutor; import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.common.util.concurrent.TimeoutAwareRunnable; import org.opensearch.common.util.set.Sets; import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; @@ -41,6 +45,7 @@ import org.opensearch.indices.store.TransportNodesListShardStoreMetadataHelper; import org.opensearch.indices.store.TransportNodesListShardStoreMetadataHelper.StoreFilesMetadata; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -68,6 +73,14 @@ public class ShardsBatchGatewayAllocator implements ExistingShardsAllocator { private final long maxBatchSize; private static final short DEFAULT_SHARD_BATCH_SIZE = 2000; + private static final String PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY = + "cluster.routing.allocation.shards_batch_gateway_allocator.primary_allocator_timeout"; + private static final String REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY = + "cluster.routing.allocation.shards_batch_gateway_allocator.replica_allocator_timeout"; + + private TimeValue primaryShardsBatchGatewayAllocatorTimeout; + private TimeValue replicaShardsBatchGatewayAllocatorTimeout; + /** * Number of shards we send in one batch to data nodes for fetching metadata */ @@ -79,6 +92,20 @@ public class ShardsBatchGatewayAllocator implements ExistingShardsAllocator { Setting.Property.NodeScope ); + public static final Setting PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING = Setting.timeSetting( + PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY, + TimeValue.MINUS_ONE, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + public static final Setting REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING = Setting.timeSetting( + REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY, + TimeValue.MINUS_ONE, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + private final RerouteService rerouteService; private final PrimaryShardBatchAllocator primaryShardBatchAllocator; private final ReplicaShardBatchAllocator replicaShardBatchAllocator; @@ -97,7 +124,8 @@ public ShardsBatchGatewayAllocator( RerouteService rerouteService, TransportNodesListGatewayStartedShardsBatch batchStartedAction, TransportNodesListShardStoreMetadataBatch batchStoreAction, - Settings settings + Settings settings, + ClusterSettings clusterSettings ) { this.rerouteService = rerouteService; this.primaryShardBatchAllocator = new InternalPrimaryBatchShardAllocator(); @@ -105,6 +133,10 @@ public ShardsBatchGatewayAllocator( this.batchStartedAction = batchStartedAction; this.batchStoreAction = batchStoreAction; this.maxBatchSize = GATEWAY_ALLOCATOR_BATCH_SIZE.get(settings); + this.primaryShardsBatchGatewayAllocatorTimeout = PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING, this::setPrimaryBatchAllocatorTimeout); + this.replicaShardsBatchGatewayAllocatorTimeout = REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING, this::setReplicaBatchAllocatorTimeout); } @Override @@ -127,7 +159,10 @@ protected ShardsBatchGatewayAllocator(long batchSize) { this.batchStoreAction = null; this.replicaShardBatchAllocator = null; this.maxBatchSize = batchSize; + this.primaryShardsBatchGatewayAllocatorTimeout = null; + this.replicaShardsBatchGatewayAllocatorTimeout = null; } + // for tests @Override @@ -187,14 +222,14 @@ public void allocateUnassigned( } @Override - public void allocateAllUnassignedShards(final RoutingAllocation allocation, boolean primary) { + public BatchRunnableExecutor allocateAllUnassignedShards(final RoutingAllocation allocation, boolean primary) { assert primaryShardBatchAllocator != null; assert replicaShardBatchAllocator != null; - innerAllocateUnassignedBatch(allocation, primaryShardBatchAllocator, replicaShardBatchAllocator, primary); + return innerAllocateUnassignedBatch(allocation, primaryShardBatchAllocator, replicaShardBatchAllocator, primary); } - protected void innerAllocateUnassignedBatch( + protected BatchRunnableExecutor innerAllocateUnassignedBatch( RoutingAllocation allocation, PrimaryShardBatchAllocator primaryBatchShardAllocator, ReplicaShardBatchAllocator replicaBatchShardAllocator, @@ -203,20 +238,45 @@ protected void innerAllocateUnassignedBatch( // create batches for unassigned shards Set batchesToAssign = createAndUpdateBatches(allocation, primary); if (batchesToAssign.isEmpty()) { - return; + return null; } + List runnables = new ArrayList<>(); if (primary) { batchIdToStartedShardBatch.values() .stream() .filter(batch -> batchesToAssign.contains(batch.batchId)) - .forEach( - shardsBatch -> primaryBatchShardAllocator.allocateUnassignedBatch(shardsBatch.getBatchedShardRoutings(), allocation) - ); + .forEach(shardsBatch -> runnables.add(new TimeoutAwareRunnable() { + @Override + public void onTimeout() { + primaryBatchShardAllocator.allocateUnassignedBatchOnTimeout( + shardsBatch.getBatchedShardRoutings(), + allocation, + true + ); + } + + @Override + public void run() { + primaryBatchShardAllocator.allocateUnassignedBatch(shardsBatch.getBatchedShardRoutings(), allocation); + } + })); + return new BatchRunnableExecutor(runnables, () -> primaryShardsBatchGatewayAllocatorTimeout); } else { batchIdToStoreShardBatch.values() .stream() .filter(batch -> batchesToAssign.contains(batch.batchId)) - .forEach(batch -> replicaBatchShardAllocator.allocateUnassignedBatch(batch.getBatchedShardRoutings(), allocation)); + .forEach(batch -> runnables.add(new TimeoutAwareRunnable() { + @Override + public void onTimeout() { + replicaBatchShardAllocator.allocateUnassignedBatchOnTimeout(batch.getBatchedShardRoutings(), allocation, false); + } + + @Override + public void run() { + replicaBatchShardAllocator.allocateUnassignedBatch(batch.getBatchedShardRoutings(), allocation); + } + })); + return new BatchRunnableExecutor(runnables, () -> replicaShardsBatchGatewayAllocatorTimeout); } } @@ -721,4 +781,12 @@ public int getNumberOfStartedShardBatches() { public int getNumberOfStoreShardBatches() { return batchIdToStoreShardBatch.size(); } + + private void setPrimaryBatchAllocatorTimeout(TimeValue primaryShardsBatchGatewayAllocatorTimeout) { + this.primaryShardsBatchGatewayAllocatorTimeout = primaryShardsBatchGatewayAllocatorTimeout; + } + + private void setReplicaBatchAllocatorTimeout(TimeValue replicaShardsBatchGatewayAllocatorTimeout) { + this.replicaShardsBatchGatewayAllocatorTimeout = replicaShardsBatchGatewayAllocatorTimeout; + } } diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/ExistingShardsAllocatorTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/ExistingShardsAllocatorTests.java new file mode 100644 index 0000000000000..1da8f5ef7f695 --- /dev/null +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/ExistingShardsAllocatorTests.java @@ -0,0 +1,118 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing.allocation; + +import org.opensearch.Version; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.OpenSearchAllocationTestCase; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.settings.Settings; + +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +public class ExistingShardsAllocatorTests extends OpenSearchAllocationTestCase { + + public void testRunnablesExecutedForUnassignedShards() throws InterruptedException { + + Metadata metadata = Metadata.builder() + .put(IndexMetadata.builder("test").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(2)) + .build(); + RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metadata.index("test")).build(); + + ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(initialRoutingTable) + .build(); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3"))) + .build(); + RoutingAllocation allocation = new RoutingAllocation( + yesAllocationDeciders(), + clusterState.getRoutingNodes(), + clusterState, + null, + null, + 0L + ); + CountDownLatch expectedStateLatch = new CountDownLatch(3); + TestAllocator testAllocator = new TestAllocator(expectedStateLatch); + testAllocator.allocateAllUnassignedShards(allocation, true).run(); + // if the below condition is passed, then we are sure runnable executed for all primary shards + assertTrue(expectedStateLatch.await(30, TimeUnit.SECONDS)); + + expectedStateLatch = new CountDownLatch(6); + testAllocator = new TestAllocator(expectedStateLatch); + testAllocator.allocateAllUnassignedShards(allocation, false).run(); + // if the below condition is passed, then we are sure runnable executed for all replica shards + assertTrue(expectedStateLatch.await(30, TimeUnit.SECONDS)); + } + + private static class TestAllocator implements ExistingShardsAllocator { + + final CountDownLatch countDownLatch; + + TestAllocator(CountDownLatch latch) { + this.countDownLatch = latch; + } + + @Override + public void beforeAllocation(RoutingAllocation allocation) { + + } + + @Override + public void afterPrimariesBeforeReplicas(RoutingAllocation allocation) { + + } + + @Override + public void allocateUnassigned( + ShardRouting shardRouting, + RoutingAllocation allocation, + UnassignedAllocationHandler unassignedAllocationHandler + ) { + countDownLatch.countDown(); + } + + @Override + public AllocateUnassignedDecision explainUnassignedShardAllocation( + ShardRouting unassignedShard, + RoutingAllocation routingAllocation + ) { + return null; + } + + @Override + public void cleanCaches() { + + } + + @Override + public void applyStartedShards(List startedShards, RoutingAllocation allocation) { + + } + + @Override + public void applyFailedShards(List failedShards, RoutingAllocation allocation) { + + } + + @Override + public int getNumberOfInFlightFetches() { + return 0; + } + } +} diff --git a/server/src/test/java/org/opensearch/common/util/BatchRunnableExecutorTests.java b/server/src/test/java/org/opensearch/common/util/BatchRunnableExecutorTests.java new file mode 100644 index 0000000000000..269f89faec54d --- /dev/null +++ b/server/src/test/java/org/opensearch/common/util/BatchRunnableExecutorTests.java @@ -0,0 +1,97 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util; + +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.TimeoutAwareRunnable; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.function.Supplier; + +import static org.mockito.Mockito.atMost; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +public class BatchRunnableExecutorTests extends OpenSearchTestCase { + private Supplier timeoutSupplier; + private TimeoutAwareRunnable runnable1; + private TimeoutAwareRunnable runnable2; + private TimeoutAwareRunnable runnable3; + private List runnableList; + + public void setupRunnables() { + timeoutSupplier = mock(Supplier.class); + runnable1 = mock(TimeoutAwareRunnable.class); + runnable2 = mock(TimeoutAwareRunnable.class); + runnable3 = mock(TimeoutAwareRunnable.class); + runnableList = Arrays.asList(runnable1, runnable2, runnable3); + } + + public void testRunWithoutTimeout() { + setupRunnables(); + timeoutSupplier = () -> TimeValue.timeValueSeconds(1); + BatchRunnableExecutor executor = new BatchRunnableExecutor(runnableList, timeoutSupplier); + executor.run(); + verify(runnable1, times(1)).run(); + verify(runnable2, times(1)).run(); + verify(runnable3, times(1)).run(); + verify(runnable1, never()).onTimeout(); + verify(runnable2, never()).onTimeout(); + verify(runnable3, never()).onTimeout(); + } + + public void testRunWithTimeout() { + setupRunnables(); + timeoutSupplier = () -> TimeValue.timeValueNanos(1); + BatchRunnableExecutor executor = new BatchRunnableExecutor(runnableList, timeoutSupplier); + executor.run(); + verify(runnable1, times(1)).onTimeout(); + verify(runnable2, times(1)).onTimeout(); + verify(runnable3, times(1)).onTimeout(); + verify(runnable1, never()).run(); + verify(runnable2, never()).run(); + verify(runnable3, never()).run(); + } + + public void testRunWithPartialTimeout() { + setupRunnables(); + timeoutSupplier = () -> TimeValue.timeValueMillis(50); + BatchRunnableExecutor executor = new BatchRunnableExecutor(runnableList, timeoutSupplier); + doAnswer(invocation -> { + Thread.sleep(100); + return null; + }).when(runnable1).run(); + executor.run(); + verify(runnable1, atMost(1)).run(); + verify(runnable2, atMost(1)).run(); + verify(runnable3, atMost(1)).run(); + verify(runnable2, atMost(1)).onTimeout(); + verify(runnable3, atMost(1)).onTimeout(); + verify(runnable2, atMost(1)).onTimeout(); + verify(runnable3, atMost(1)).onTimeout(); + } + + public void testRunWithEmptyRunnableList() { + setupRunnables(); + BatchRunnableExecutor executor = new BatchRunnableExecutor(Collections.emptyList(), timeoutSupplier); + executor.run(); + verify(runnable1, never()).onTimeout(); + verify(runnable2, never()).onTimeout(); + verify(runnable3, never()).onTimeout(); + verify(runnable1, never()).run(); + verify(runnable2, never()).run(); + verify(runnable3, never()).run(); + } +} diff --git a/server/src/test/java/org/opensearch/gateway/GatewayAllocatorTests.java b/server/src/test/java/org/opensearch/gateway/GatewayAllocatorTests.java index aa31c710c1fbd..bd56123f6df1f 100644 --- a/server/src/test/java/org/opensearch/gateway/GatewayAllocatorTests.java +++ b/server/src/test/java/org/opensearch/gateway/GatewayAllocatorTests.java @@ -32,6 +32,7 @@ import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.BatchRunnableExecutor; import org.opensearch.common.util.set.Sets; import org.opensearch.core.index.shard.ShardId; import org.opensearch.snapshots.SnapshotShardSizeInfo; @@ -61,6 +62,13 @@ public void setUp() throws Exception { testShardsBatchGatewayAllocator = new TestShardBatchGatewayAllocator(); } + public void testExecutorNotNull() { + createIndexAndUpdateClusterState(1, 3, 1); + createBatchesAndAssert(1); + BatchRunnableExecutor executor = testShardsBatchGatewayAllocator.allocateAllUnassignedShards(testAllocation, true); + assertNotNull(executor); + } + public void testSingleBatchCreation() { createIndexAndUpdateClusterState(1, 3, 1); createBatchesAndAssert(1); @@ -336,6 +344,30 @@ public void testGetBatchIdNonExisting() { allShardRoutings.forEach(shard -> assertNull(testShardsBatchGatewayAllocator.getBatchId(shard, shard.primary()))); } + public void testCreatePrimaryAndReplicaExecutorOfSizeOne() { + createIndexAndUpdateClusterState(1, 3, 2); + BatchRunnableExecutor executor = testShardsBatchGatewayAllocator.allocateAllUnassignedShards(testAllocation, true); + assertEquals(executor.getTimeoutAwareRunnables().size(), 1); + executor = testShardsBatchGatewayAllocator.allocateAllUnassignedShards(testAllocation, false); + assertEquals(executor.getTimeoutAwareRunnables().size(), 1); + } + + public void testCreatePrimaryExecutorOfSizeOneAndReplicaExecutorOfSizeZero() { + createIndexAndUpdateClusterState(1, 3, 0); + BatchRunnableExecutor executor = testShardsBatchGatewayAllocator.allocateAllUnassignedShards(testAllocation, true); + assertEquals(executor.getTimeoutAwareRunnables().size(), 1); + executor = testShardsBatchGatewayAllocator.allocateAllUnassignedShards(testAllocation, false); + assertNull(executor); + } + + public void testCreatePrimaryAndReplicaExecutorOfSizeTwo() { + createIndexAndUpdateClusterState(2, 1001, 1); + BatchRunnableExecutor executor = testShardsBatchGatewayAllocator.allocateAllUnassignedShards(testAllocation, true); + assertEquals(executor.getTimeoutAwareRunnables().size(), 2); + executor = testShardsBatchGatewayAllocator.allocateAllUnassignedShards(testAllocation, false); + assertEquals(executor.getTimeoutAwareRunnables().size(), 2); + } + private void createIndexAndUpdateClusterState(int count, int numberOfShards, int numberOfReplicas) { if (count == 0) return; Metadata.Builder metadata = Metadata.builder(); diff --git a/server/src/test/java/org/opensearch/gateway/PrimaryShardBatchAllocatorTests.java b/server/src/test/java/org/opensearch/gateway/PrimaryShardBatchAllocatorTests.java index 8ad8bcda95f40..270cf465d0f80 100644 --- a/server/src/test/java/org/opensearch/gateway/PrimaryShardBatchAllocatorTests.java +++ b/server/src/test/java/org/opensearch/gateway/PrimaryShardBatchAllocatorTests.java @@ -41,6 +41,7 @@ import org.junit.Before; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -256,6 +257,52 @@ public void testAllocateUnassignedBatchThrottlingAllocationDeciderIsHonoured() { assertEquals(UnassignedInfo.AllocationStatus.DECIDERS_THROTTLED, ignoredShards.get(0).unassignedInfo().getLastAllocationStatus()); } + public void testAllocateUnassignedBatchOnTimeoutWithMatchingPrimaryShards() { + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + AllocationDeciders allocationDeciders = randomAllocationDeciders(Settings.builder().build(), clusterSettings, random()); + setUpShards(1); + final RoutingAllocation routingAllocation = routingAllocationWithOnePrimary(allocationDeciders, CLUSTER_RECOVERED, "allocId-0"); + ShardRouting shardRouting = routingAllocation.routingTable().getIndicesRouting().get("test").shard(shardId.id()).primaryShard(); + + List shardRoutings = Arrays.asList(shardRouting); + batchAllocator.allocateUnassignedBatchOnTimeout(shardRoutings, routingAllocation, true); + + List ignoredShards = routingAllocation.routingNodes().unassigned().ignored(); + assertEquals(1, ignoredShards.size()); + assertEquals(UnassignedInfo.AllocationStatus.DECIDERS_THROTTLED, ignoredShards.get(0).unassignedInfo().getLastAllocationStatus()); + } + + public void testAllocateUnassignedBatchOnTimeoutWithNoMatchingPrimaryShards() { + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + AllocationDeciders allocationDeciders = randomAllocationDeciders(Settings.builder().build(), clusterSettings, random()); + setUpShards(1); + final RoutingAllocation routingAllocation = routingAllocationWithOnePrimary(allocationDeciders, CLUSTER_RECOVERED, "allocId-0"); + List shardRoutings = new ArrayList<>(); + batchAllocator.allocateUnassignedBatchOnTimeout(shardRoutings, routingAllocation, true); + + List ignoredShards = routingAllocation.routingNodes().unassigned().ignored(); + assertEquals(0, ignoredShards.size()); + } + + public void testAllocateUnassignedBatchOnTimeoutWithNonPrimaryShards() { + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + AllocationDeciders allocationDeciders = randomAllocationDeciders(Settings.builder().build(), clusterSettings, random()); + setUpShards(1); + final RoutingAllocation routingAllocation = routingAllocationWithOnePrimary(allocationDeciders, CLUSTER_RECOVERED, "allocId-0"); + + ShardRouting shardRouting = routingAllocation.routingTable() + .getIndicesRouting() + .get("test") + .shard(shardId.id()) + .replicaShards() + .get(0); + List shardRoutings = Arrays.asList(shardRouting); + batchAllocator.allocateUnassignedBatchOnTimeout(shardRoutings, routingAllocation, false); + + List ignoredShards = routingAllocation.routingNodes().unassigned().ignored(); + assertEquals(1, ignoredShards.size()); + } + private RoutingAllocation routingAllocationWithOnePrimary( AllocationDeciders deciders, UnassignedInfo.Reason reason, diff --git a/server/src/test/java/org/opensearch/gateway/ReplicaShardBatchAllocatorTests.java b/server/src/test/java/org/opensearch/gateway/ReplicaShardBatchAllocatorTests.java index 526a3990955b8..435fd78be2bcd 100644 --- a/server/src/test/java/org/opensearch/gateway/ReplicaShardBatchAllocatorTests.java +++ b/server/src/test/java/org/opensearch/gateway/ReplicaShardBatchAllocatorTests.java @@ -717,6 +717,33 @@ public void testAllocateUnassignedBatchThrottlingAllocationDeciderIsHonoured() t assertEquals(UnassignedInfo.AllocationStatus.DECIDERS_THROTTLED, allocateUnassignedDecision.getAllocationStatus()); } + public void testAllocateUnassignedBatchOnTimeoutWithUnassignedReplicaShard() { + RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders()); + final RoutingNodes.UnassignedShards.UnassignedIterator iterator = allocation.routingNodes().unassigned().iterator(); + List shards = new ArrayList<>(); + while (iterator.hasNext()) { + shards.add(iterator.next()); + } + testBatchAllocator.allocateUnassignedBatchOnTimeout(shards, allocation, false); + assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); + assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); + assertEquals( + UnassignedInfo.AllocationStatus.NO_ATTEMPT, + allocation.routingNodes().unassigned().ignored().get(0).unassignedInfo().getLastAllocationStatus() + ); + } + + public void testAllocateUnassignedBatchOnTimeoutWithAlreadyRecoveringReplicaShard() { + RoutingAllocation allocation = onePrimaryOnNode1And1ReplicaRecovering(yesAllocationDeciders()); + final RoutingNodes.UnassignedShards.UnassignedIterator iterator = allocation.routingNodes().unassigned().iterator(); + List shards = new ArrayList<>(); + while (iterator.hasNext()) { + shards.add(iterator.next()); + } + testBatchAllocator.allocateUnassignedBatchOnTimeout(shards, allocation, false); + assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(0)); + } + private RoutingAllocation onePrimaryOnNode1And1Replica(AllocationDeciders deciders) { return onePrimaryOnNode1And1Replica(deciders, Settings.EMPTY, UnassignedInfo.Reason.CLUSTER_RECOVERED); } diff --git a/test/framework/src/main/java/org/opensearch/test/gateway/TestShardBatchGatewayAllocator.java b/test/framework/src/main/java/org/opensearch/test/gateway/TestShardBatchGatewayAllocator.java index fbb39c284f0ff..0eb4bb6935bac 100644 --- a/test/framework/src/main/java/org/opensearch/test/gateway/TestShardBatchGatewayAllocator.java +++ b/test/framework/src/main/java/org/opensearch/test/gateway/TestShardBatchGatewayAllocator.java @@ -13,6 +13,7 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.allocation.AllocateUnassignedDecision; import org.opensearch.cluster.routing.allocation.RoutingAllocation; +import org.opensearch.common.util.BatchRunnableExecutor; import org.opensearch.core.index.shard.ShardId; import org.opensearch.gateway.AsyncShardFetch; import org.opensearch.gateway.PrimaryShardBatchAllocator; @@ -102,9 +103,9 @@ protected boolean hasInitiatedFetching(ShardRouting shard) { }; @Override - public void allocateAllUnassignedShards(RoutingAllocation allocation, boolean primary) { + public BatchRunnableExecutor allocateAllUnassignedShards(RoutingAllocation allocation, boolean primary) { currentNodes = allocation.nodes(); - innerAllocateUnassignedBatch(allocation, primaryBatchShardAllocator, replicaBatchShardAllocator, primary); + return innerAllocateUnassignedBatch(allocation, primaryBatchShardAllocator, replicaBatchShardAllocator, primary); } @Override From 1fe58b5d712cfef525abfbd2dfaf398c0368745f Mon Sep 17 00:00:00 2001 From: gaobinlong Date: Wed, 24 Jul 2024 19:54:04 +0800 Subject: [PATCH 14/68] Fix the documentation url of the Create or Update alias API in rest-api-spec (#14935) Signed-off-by: Gao Binlong --- .../src/main/resources/rest-api-spec/api/indices.put_alias.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_alias.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_alias.json index d99edcf5513f9..14427b00f1bb3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_alias.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_alias.json @@ -1,7 +1,7 @@ { "indices.put_alias":{ "documentation":{ - "url":"https://opensearch.org/docs/latest/api-reference/index-apis/alias/", + "url":"https://opensearch.org/docs/latest/api-reference/index-apis/update-alias/", "description":"Creates or updates an alias." }, "stability":"stable", From c76bfebd49c8129b564edc68ce59f01853dc6722 Mon Sep 17 00:00:00 2001 From: Mohit Godwani <81609427+mgodwan@users.noreply.github.com> Date: Wed, 24 Jul 2024 18:44:03 +0530 Subject: [PATCH 15/68] Template creation using context (#14811) * Template creation using context Signed-off-by: Mohit Godwani --- CHANGELOG.md | 1 + .../TransportSimulateIndexTemplateAction.java | 3 +- .../post/TransportSimulateTemplateAction.java | 3 +- .../SystemTemplateMetadata.java | 29 +- .../SystemTemplatesService.java | 2 +- .../TemplateRepositoryMetadata.java | 20 + .../coordination/OpenSearchNodeCommand.java | 6 +- .../metadata/ComposableIndexTemplate.java | 45 +- .../opensearch/cluster/metadata/Context.java | 130 +++++ .../opensearch/cluster/metadata/Metadata.java | 48 +- .../MetadataIndexTemplateService.java | 117 ++++- .../SystemTemplatesServiceTests.java | 42 +- .../MetadataIndexTemplateServiceTests.java | 459 +++++++++++++++++- 13 files changed, 862 insertions(+), 43 deletions(-) create mode 100644 server/src/main/java/org/opensearch/cluster/metadata/Context.java diff --git a/CHANGELOG.md b/CHANGELOG.md index edc0ca2732f25..00560d68e4051 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Create listener to refresh search thread resource usage ([#14832](https://github.com/opensearch-project/OpenSearch/pull/14832)) - Add rest, transport layer changes for hot to warm tiering - dedicated setup (([#13980](https://github.com/opensearch-project/OpenSearch/pull/13980)) - Optimize Cluster Stats Indices to precomute node level stats ([#14426](https://github.com/opensearch-project/OpenSearch/pull/14426)) +- Add logic to create index templates (v2) using context field ([#14811](https://github.com/opensearch-project/OpenSearch/pull/14811)) ### Dependencies - Bump `org.gradle.test-retry` from 1.5.8 to 1.5.9 ([#13442](https://github.com/opensearch-project/OpenSearch/pull/13442)) diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java index c1a02d813ffb2..22f1831a54164 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java @@ -140,7 +140,8 @@ protected void clusterManagerOperation( MetadataIndexTemplateService.validateV2TemplateRequest( state.metadata(), simulateTemplateToAdd, - request.getIndexTemplateRequest().indexTemplate() + request.getIndexTemplateRequest().indexTemplate(), + clusterService.getClusterSettings() ); stateWithTemplate = indexTemplateService.addIndexTemplateV2( state, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java index 6565896fd3db2..03190445647ad 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java @@ -134,7 +134,8 @@ protected void clusterManagerOperation( MetadataIndexTemplateService.validateV2TemplateRequest( state.metadata(), simulateTemplateToAdd, - request.getIndexTemplateRequest().indexTemplate() + request.getIndexTemplateRequest().indexTemplate(), + clusterService.getClusterSettings() ); stateWithTemplate = indexTemplateService.addIndexTemplateV2( state, diff --git a/server/src/main/java/org/opensearch/cluster/applicationtemplates/SystemTemplateMetadata.java b/server/src/main/java/org/opensearch/cluster/applicationtemplates/SystemTemplateMetadata.java index 9bbe27ac0e281..227b70ffa2ef5 100644 --- a/server/src/main/java/org/opensearch/cluster/applicationtemplates/SystemTemplateMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/applicationtemplates/SystemTemplateMetadata.java @@ -10,6 +10,8 @@ import org.opensearch.common.annotation.ExperimentalApi; +import java.util.Objects; + /** * Metadata information about a template available in a template repository. */ @@ -48,13 +50,14 @@ public long version() { * @return Metadata object based on name */ public static SystemTemplateMetadata fromComponentTemplate(String fullyQualifiedName) { - assert fullyQualifiedName.length() > 1 : "System template name must have at least one component"; - assert fullyQualifiedName.substring(1, fullyQualifiedName.indexOf(DELIMITER, 1)).equals(COMPONENT_TEMPLATE_TYPE); + assert fullyQualifiedName.length() > DELIMITER.length() * 3 + 2 + COMPONENT_TEMPLATE_TYPE.length() + : "System template name must have all defined components"; + assert (DELIMITER + fullyQualifiedName.substring(1, fullyQualifiedName.indexOf(DELIMITER, 1))).equals(COMPONENT_TEMPLATE_TYPE); return new SystemTemplateMetadata( - Long.parseLong(fullyQualifiedName.substring(fullyQualifiedName.lastIndexOf(DELIMITER))), + Long.parseLong(fullyQualifiedName.substring(fullyQualifiedName.lastIndexOf(DELIMITER) + 1)), COMPONENT_TEMPLATE_TYPE, - fullyQualifiedName.substring(0, fullyQualifiedName.lastIndexOf(DELIMITER)) + fullyQualifiedName.substring(fullyQualifiedName.indexOf(DELIMITER, 2) + 1, fullyQualifiedName.lastIndexOf(DELIMITER)) ); } @@ -65,4 +68,22 @@ public static SystemTemplateMetadata fromComponentTemplateInfo(String name, long public final String fullyQualifiedName() { return type + DELIMITER + name + DELIMITER + version; } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + SystemTemplateMetadata that = (SystemTemplateMetadata) o; + return version == that.version && Objects.equals(type, that.type) && Objects.equals(name, that.name); + } + + @Override + public int hashCode() { + return Objects.hash(version, type, name); + } + + @Override + public String toString() { + return "SystemTemplateMetadata{" + "version=" + version + ", type='" + type + '\'' + ", name='" + name + '\'' + '}'; + } } diff --git a/server/src/main/java/org/opensearch/cluster/applicationtemplates/SystemTemplatesService.java b/server/src/main/java/org/opensearch/cluster/applicationtemplates/SystemTemplatesService.java index ccb9272fa57b1..90652192e5c28 100644 --- a/server/src/main/java/org/opensearch/cluster/applicationtemplates/SystemTemplatesService.java +++ b/server/src/main/java/org/opensearch/cluster/applicationtemplates/SystemTemplatesService.java @@ -85,7 +85,7 @@ void refreshTemplates(boolean verification) { int failedLoadingRepositories = 0; List exceptions = new ArrayList<>(); - if (loaded.compareAndSet(false, true) && enabledTemplates) { + if ((verification || loaded.compareAndSet(false, true)) && enabledTemplates) { for (SystemTemplatesPlugin plugin : systemTemplatesPluginList) { try (SystemTemplateRepository repository = plugin.loadRepository()) { diff --git a/server/src/main/java/org/opensearch/cluster/applicationtemplates/TemplateRepositoryMetadata.java b/server/src/main/java/org/opensearch/cluster/applicationtemplates/TemplateRepositoryMetadata.java index 7ab4553aade0e..1fa79d291480b 100644 --- a/server/src/main/java/org/opensearch/cluster/applicationtemplates/TemplateRepositoryMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/applicationtemplates/TemplateRepositoryMetadata.java @@ -10,6 +10,8 @@ import org.opensearch.common.annotation.ExperimentalApi; +import java.util.Objects; + /** * The information to uniquely identify a template repository. */ @@ -31,4 +33,22 @@ public String id() { public long version() { return version; } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + TemplateRepositoryMetadata that = (TemplateRepositoryMetadata) o; + return version == that.version && Objects.equals(id, that.id); + } + + @Override + public int hashCode() { + return Objects.hash(id, version); + } + + @Override + public String toString() { + return "TemplateRepositoryMetadata{" + "id='" + id + '\'' + ", version=" + version + '}'; + } } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/OpenSearchNodeCommand.java b/server/src/main/java/org/opensearch/cluster/coordination/OpenSearchNodeCommand.java index 259d8961a3e78..896fe6fc8024b 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/OpenSearchNodeCommand.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/OpenSearchNodeCommand.java @@ -47,6 +47,7 @@ import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.Diff; +import org.opensearch.cluster.metadata.ComponentTemplateMetadata; import org.opensearch.cluster.metadata.DataStreamMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.common.collect.Tuple; @@ -94,9 +95,10 @@ public abstract class OpenSearchNodeCommand extends EnvironmentAwareCommand { public T parseNamedObject(Class categoryClass, String name, XContentParser parser, C context) throws IOException { // Currently, two unknown top-level objects are present if (Metadata.Custom.class.isAssignableFrom(categoryClass)) { - if (DataStreamMetadata.TYPE.equals(name)) { + if (DataStreamMetadata.TYPE.equals(name) || ComponentTemplateMetadata.TYPE.equals(name)) { // DataStreamMetadata is used inside Metadata class for validation purposes and building the indicesLookup, - // therefor even es node commands need to be able to parse it. + // ComponentTemplateMetadata is used inside Metadata class for building the systemTemplatesLookup, + // therefor even OpenSearch node commands need to be able to parse it. return super.parseNamedObject(categoryClass, name, parser, context); // TODO: Try to parse other named objects (e.g. stored scripts, ingest pipelines) that are part of core es as well? // Note that supporting PersistentTasksCustomMetadata is trickier, because PersistentTaskParams is a named object too. diff --git a/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java b/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java index e7f1b97f28842..594dda83c41e2 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java @@ -32,6 +32,7 @@ package org.opensearch.cluster.metadata; +import org.opensearch.Version; import org.opensearch.cluster.AbstractDiffable; import org.opensearch.cluster.Diff; import org.opensearch.cluster.metadata.DataStream.TimestampField; @@ -75,6 +76,7 @@ public class ComposableIndexTemplate extends AbstractDiffable PARSER = new ConstructingObjectParser<>( @@ -87,7 +89,8 @@ public class ComposableIndexTemplate extends AbstractDiffable) a[5], - (DataStreamTemplate) a[6] + (DataStreamTemplate) a[6], + (Context) a[7] ) ); @@ -99,6 +102,7 @@ public class ComposableIndexTemplate extends AbstractDiffable p.map(), METADATA); PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), DataStreamTemplate.PARSER, DATA_STREAM); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), Context.PARSER, CONTEXT); } private final List indexPatterns; @@ -114,6 +118,8 @@ public class ComposableIndexTemplate extends AbstractDiffable metadata; @Nullable private final DataStreamTemplate dataStreamTemplate; + @Nullable + private final Context context; static Diff readITV2DiffFrom(StreamInput in) throws IOException { return AbstractDiffable.readDiffFrom(ComposableIndexTemplate::new, in); @@ -131,7 +137,7 @@ public ComposableIndexTemplate( @Nullable Long version, @Nullable Map metadata ) { - this(indexPatterns, template, componentTemplates, priority, version, metadata, null); + this(indexPatterns, template, componentTemplates, priority, version, metadata, null, null); } public ComposableIndexTemplate( @@ -142,6 +148,19 @@ public ComposableIndexTemplate( @Nullable Long version, @Nullable Map metadata, @Nullable DataStreamTemplate dataStreamTemplate + ) { + this(indexPatterns, template, componentTemplates, priority, version, metadata, dataStreamTemplate, null); + } + + public ComposableIndexTemplate( + List indexPatterns, + @Nullable Template template, + @Nullable List componentTemplates, + @Nullable Long priority, + @Nullable Long version, + @Nullable Map metadata, + @Nullable DataStreamTemplate dataStreamTemplate, + @Nullable Context context ) { this.indexPatterns = indexPatterns; this.template = template; @@ -150,6 +169,7 @@ public ComposableIndexTemplate( this.version = version; this.metadata = metadata; this.dataStreamTemplate = dataStreamTemplate; + this.context = context; } public ComposableIndexTemplate(StreamInput in) throws IOException { @@ -164,6 +184,11 @@ public ComposableIndexTemplate(StreamInput in) throws IOException { this.version = in.readOptionalVLong(); this.metadata = in.readMap(); this.dataStreamTemplate = in.readOptionalWriteable(DataStreamTemplate::new); + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + this.context = in.readOptionalWriteable(Context::new); + } else { + this.context = null; + } } public List indexPatterns() { @@ -205,6 +230,10 @@ public DataStreamTemplate getDataStreamTemplate() { return dataStreamTemplate; } + public Context context() { + return context; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeStringCollection(this.indexPatterns); @@ -219,6 +248,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalVLong(this.version); out.writeMap(this.metadata); out.writeOptionalWriteable(dataStreamTemplate); + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeOptionalWriteable(context); + } } @Override @@ -243,6 +275,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (this.dataStreamTemplate != null) { builder.field(DATA_STREAM.getPreferredName(), dataStreamTemplate); } + if (this.context != null) { + builder.field(CONTEXT.getPreferredName(), context); + } builder.endObject(); return builder; } @@ -256,7 +291,8 @@ public int hashCode() { this.priority, this.version, this.metadata, - this.dataStreamTemplate + this.dataStreamTemplate, + this.context ); } @@ -275,7 +311,8 @@ public boolean equals(Object obj) { && Objects.equals(this.priority, other.priority) && Objects.equals(this.version, other.version) && Objects.equals(this.metadata, other.metadata) - && Objects.equals(this.dataStreamTemplate, other.dataStreamTemplate); + && Objects.equals(this.dataStreamTemplate, other.dataStreamTemplate) + && Objects.equals(this.context, other.context); } @Override diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Context.java b/server/src/main/java/org/opensearch/cluster/metadata/Context.java new file mode 100644 index 0000000000000..4bd6134e8a318 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/metadata/Context.java @@ -0,0 +1,130 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.metadata; + +import org.opensearch.cluster.AbstractDiffable; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +/** + * Class encapsulating the context metadata associated with an index template/index. + */ +@ExperimentalApi +public class Context extends AbstractDiffable implements ToXContentObject { + + private static final ParseField NAME = new ParseField("name"); + private static final ParseField VERSION = new ParseField("version"); + private static final ParseField PARAMS = new ParseField("params"); + + public static final String LATEST_VERSION = "_latest"; + + private String name; + private String version = LATEST_VERSION; + private Map params; + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "index_template", + false, + a -> new Context((String) a[0], (String) a[1], (Map) a[2]) + ); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), NAME); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), VERSION); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> p.map(), PARAMS); + } + + public Context(String name) { + this(name, LATEST_VERSION, Map.of()); + } + + public Context(String name, String version, Map params) { + this.name = name; + if (version != null) { + this.version = version; + } + this.params = params; + } + + public Context(StreamInput in) throws IOException { + this.name = in.readString(); + this.version = in.readOptionalString(); + this.params = in.readMap(); + } + + public String name() { + return name; + } + + public void name(String name) { + this.name = name; + } + + public String version() { + return version; + } + + public void version(String version) { + this.version = version; + } + + public Map params() { + return params; + } + + public void params(Map params) { + this.params = params; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(name); + out.writeOptionalString(version); + out.writeMap(params); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(NAME.getPreferredName(), this.name); + builder.field("version", this.version); + if (params != null) { + builder.field("params", this.params); + } + builder.endObject(); + return builder; + } + + public static Context fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Context context = (Context) o; + return Objects.equals(name, context.name) && Objects.equals(version, context.version) && Objects.equals(params, context.params); + } + + @Override + public int hashCode() { + return Objects.hash(name, version, params); + } +} diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java index 2a54f6444ffda..440b9e267cf0a 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java @@ -43,6 +43,7 @@ import org.opensearch.cluster.DiffableUtils; import org.opensearch.cluster.NamedDiffable; import org.opensearch.cluster.NamedDiffableValueSerializer; +import org.opensearch.cluster.applicationtemplates.SystemTemplateMetadata; import org.opensearch.cluster.block.ClusterBlock; import org.opensearch.cluster.block.ClusterBlockLevel; import org.opensearch.cluster.coordination.CoordinationMetadata; @@ -280,6 +281,8 @@ static Custom fromXContent(XContentParser parser, String name) throws IOExceptio private final SortedMap indicesLookup; + private final Map> systemTemplatesLookup; + Metadata( String clusterUUID, boolean clusterUUIDCommitted, @@ -297,7 +300,8 @@ static Custom fromXContent(XContentParser parser, String name) throws IOExceptio String[] visibleOpenIndices, String[] allClosedIndices, String[] visibleClosedIndices, - SortedMap indicesLookup + SortedMap indicesLookup, + Map> systemTemplatesLookup ) { this.clusterUUID = clusterUUID; this.clusterUUIDCommitted = clusterUUIDCommitted; @@ -328,6 +332,7 @@ static Custom fromXContent(XContentParser parser, String name) throws IOExceptio this.allClosedIndices = allClosedIndices; this.visibleClosedIndices = visibleClosedIndices; this.indicesLookup = indicesLookup; + this.systemTemplatesLookup = systemTemplatesLookup; } public long version() { @@ -828,6 +833,10 @@ public Map componentTemplates() { .orElse(Collections.emptyMap()); } + public Map> systemTemplatesLookup() { + return systemTemplatesLookup; + } + public Map templatesV2() { return Optional.ofNullable((ComposableIndexTemplateMetadata) this.custom(ComposableIndexTemplateMetadata.TYPE)) .map(ComposableIndexTemplateMetadata::indexTemplates) @@ -1189,6 +1198,8 @@ public static class Builder { private final Map customs; private final Metadata previousMetadata; + private Map> systemTemplatesLookup; + public Builder() { clusterUUID = UNKNOWN_CLUSTER_UUID; indices = new HashMap<>(); @@ -1554,6 +1565,8 @@ public Metadata build() { ? (DataStreamMetadata) this.previousMetadata.customs.get(DataStreamMetadata.TYPE) : null; + buildSystemTemplatesLookup(); + boolean recomputeRequiredforIndicesLookups = (previousMetadata == null) || (indices.equals(previousMetadata.indices) == false) || (previousDataStreamMetadata != null && previousDataStreamMetadata.equals(dataStreamMetadata) == false) @@ -1564,6 +1577,33 @@ public Metadata build() { : buildMetadataWithRecomputedIndicesLookups(); } + private void buildSystemTemplatesLookup() { + if (previousMetadata != null + && Objects.equals( + previousMetadata.customs.get(ComponentTemplateMetadata.TYPE), + this.customs.get(ComponentTemplateMetadata.TYPE) + )) { + systemTemplatesLookup = Collections.unmodifiableMap(previousMetadata.systemTemplatesLookup); + } else { + systemTemplatesLookup = new HashMap<>(); + Optional.ofNullable((ComponentTemplateMetadata) this.customs.get(ComponentTemplateMetadata.TYPE)) + .map(ComponentTemplateMetadata::componentTemplates) + .orElseGet(Collections::emptyMap) + .forEach((k, v) -> { + if (MetadataIndexTemplateService.isSystemTemplate(v)) { + SystemTemplateMetadata templateMetadata = SystemTemplateMetadata.fromComponentTemplate(k); + systemTemplatesLookup.compute(templateMetadata.name(), (ik, iv) -> { + if (iv == null) { + iv = new TreeMap<>(); + } + iv.put(templateMetadata.version(), k); + return iv; + }); + } + }); + } + } + protected Metadata buildMetadataWithPreviousIndicesLookups() { return new Metadata( clusterUUID, @@ -1582,7 +1622,8 @@ protected Metadata buildMetadataWithPreviousIndicesLookups() { Arrays.copyOf(previousMetadata.visibleOpenIndices, previousMetadata.visibleOpenIndices.length), Arrays.copyOf(previousMetadata.allClosedIndices, previousMetadata.allClosedIndices.length), Arrays.copyOf(previousMetadata.visibleClosedIndices, previousMetadata.visibleClosedIndices.length), - Collections.unmodifiableSortedMap(previousMetadata.indicesLookup) + Collections.unmodifiableSortedMap(previousMetadata.indicesLookup), + systemTemplatesLookup ); } @@ -1705,7 +1746,8 @@ protected Metadata buildMetadataWithRecomputedIndicesLookups() { visibleOpenIndicesArray, allClosedIndicesArray, visibleClosedIndicesArray, - indicesLookup + indicesLookup, + systemTemplatesLookup ); } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java index 5b03d3f7b19ce..7bc3d279513cd 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java @@ -42,6 +42,9 @@ import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; +import org.opensearch.cluster.applicationtemplates.ClusterStateSystemTemplateLoader; +import org.opensearch.cluster.applicationtemplates.SystemTemplateMetadata; +import org.opensearch.cluster.applicationtemplates.SystemTemplatesService; import org.opensearch.cluster.service.ClusterManagerTaskKeys; import org.opensearch.cluster.service.ClusterManagerTaskThrottler; import org.opensearch.cluster.service.ClusterService; @@ -53,9 +56,11 @@ import org.opensearch.common.inject.Inject; import org.opensearch.common.logging.HeaderWarning; import org.opensearch.common.regex.Regex; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.set.Sets; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.action.ActionListener; @@ -72,6 +77,7 @@ import org.opensearch.indices.IndexTemplateMissingException; import org.opensearch.indices.IndicesService; import org.opensearch.indices.InvalidIndexTemplateException; +import org.opensearch.threadpool.ThreadPool; import java.io.IOException; import java.io.UncheckedIOException; @@ -94,6 +100,7 @@ import static org.opensearch.cluster.metadata.MetadataCreateDataStreamService.validateTimestampFieldMapping; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateRefreshIntervalSettings; +import static org.opensearch.common.util.concurrent.ThreadContext.ACTION_ORIGIN_TRANSIENT_NAME; import static org.opensearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.NO_LONGER_ASSIGNED; /** @@ -116,6 +123,7 @@ public class MetadataIndexTemplateService { private final ClusterManagerTaskThrottler.ThrottlingKey removeIndexTemplateV2TaskKey; private final ClusterManagerTaskThrottler.ThrottlingKey createComponentTemplateTaskKey; private final ClusterManagerTaskThrottler.ThrottlingKey removeComponentTemplateTaskKey; + private final ThreadPool threadPool; @Inject public MetadataIndexTemplateService( @@ -124,7 +132,8 @@ public MetadataIndexTemplateService( AliasValidator aliasValidator, IndicesService indicesService, IndexScopedSettings indexScopedSettings, - NamedXContentRegistry xContentRegistry + NamedXContentRegistry xContentRegistry, + ThreadPool threadPool ) { this.clusterService = clusterService; this.aliasValidator = aliasValidator; @@ -132,6 +141,7 @@ public MetadataIndexTemplateService( this.metadataCreateIndexService = metadataCreateIndexService; this.indexScopedSettings = indexScopedSettings; this.xContentRegistry = xContentRegistry; + this.threadPool = threadPool; // Task is onboarded for throttling, it will get retried from associated TransportClusterManagerNodeAction. createIndexTemplateTaskKey = clusterService.registerClusterManagerTask(ClusterManagerTaskKeys.CREATE_INDEX_TEMPLATE_KEY, true); @@ -209,6 +219,7 @@ public void putComponentTemplate( final ComponentTemplate template, final ActionListener listener ) { + validateComponentTemplateRequest(template); clusterService.submitStateUpdateTask( "create-component-template [" + name + "], cause [" + cause + "]", new ClusterStateUpdateTask(Priority.URGENT) { @@ -378,6 +389,7 @@ public void removeComponentTemplate( final ActionListener listener ) { validateNotInUse(clusterService.state().metadata(), name); + validateComponentTemplateRequest(clusterService.state().metadata().componentTemplates().get(name)); clusterService.submitStateUpdateTask("remove-component-template [" + name + "]", new ClusterStateUpdateTask(Priority.URGENT) { @Override @@ -439,7 +451,12 @@ static void validateNotInUse(Metadata metadata, String templateNameOrWildcard) { .collect(Collectors.toSet()); final Set componentsBeingUsed = new HashSet<>(); final List templatesStillUsing = metadata.templatesV2().entrySet().stream().filter(e -> { - Set intersecting = Sets.intersection(new HashSet<>(e.getValue().composedOf()), matchingComponentTemplates); + Set referredComponentTemplates = new HashSet<>(e.getValue().composedOf()); + String systemTemplateUsed = findContextTemplate(metadata, e.getValue().context()); + if (systemTemplateUsed != null) { + referredComponentTemplates.add(systemTemplateUsed); + } + Set intersecting = Sets.intersection(referredComponentTemplates, matchingComponentTemplates); if (intersecting.size() > 0) { componentsBeingUsed.addAll(intersecting); return true; @@ -469,7 +486,7 @@ public void putIndexTemplateV2( final ComposableIndexTemplate template, final ActionListener listener ) { - validateV2TemplateRequest(clusterService.state().metadata(), name, template); + validateV2TemplateRequest(clusterService.state().metadata(), name, template, clusterService.getClusterSettings()); clusterService.submitStateUpdateTask( "create-index-template-v2 [" + name + "], cause [" + cause + "]", new ClusterStateUpdateTask(Priority.URGENT) { @@ -502,7 +519,12 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS ); } - public static void validateV2TemplateRequest(Metadata metadata, String name, ComposableIndexTemplate template) { + public static void validateV2TemplateRequest( + Metadata metadata, + String name, + ComposableIndexTemplate template, + ClusterSettings settings + ) { if (template.indexPatterns().stream().anyMatch(Regex::isMatchAllPattern)) { Settings mergedSettings = resolveSettings(metadata, template); if (IndexMetadata.INDEX_HIDDEN_SETTING.exists(mergedSettings)) { @@ -514,6 +536,8 @@ public static void validateV2TemplateRequest(Metadata metadata, String name, Com } final Map componentTemplates = metadata.componentTemplates(); + final boolean isContextAllowed = FeatureFlags.isEnabled(FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES); + final List missingComponentTemplates = template.composedOf() .stream() .filter(componentTemplate -> componentTemplates.containsKey(componentTemplate) == false) @@ -525,6 +549,59 @@ public static void validateV2TemplateRequest(Metadata metadata, String name, Com "index template [" + name + "] specifies component templates " + missingComponentTemplates + " that do not exist" ); } + + if (template.context() != null && !isContextAllowed) { + throw new InvalidIndexTemplateException( + name, + "index template [" + + name + + "] specifies a context which cannot be used without enabling: " + + SystemTemplatesService.SETTING_APPLICATION_BASED_CONFIGURATION_TEMPLATES_ENABLED.getKey() + ); + } + + if (isContextAllowed + && template.composedOf().stream().anyMatch(componentTemplate -> isSystemTemplate(componentTemplates.get(componentTemplate)))) { + throw new InvalidIndexTemplateException( + name, + "index template [" + name + "] specifies a component templates which can only be used in context." + ); + } + + if (template.context() != null && findContextTemplate(metadata, template.context()) == null) { + throw new InvalidIndexTemplateException( + name, + "index template [" + name + "] specifies a context which is not loaded on the cluster." + ); + } + } + + private void validateComponentTemplateRequest(ComponentTemplate componentTemplate) { + if (isSystemTemplate(componentTemplate) + && !ClusterStateSystemTemplateLoader.TEMPLATE_LOADER_IDENTIFIER.equals( + threadPool.getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME) + )) { + throw new IllegalArgumentException("A system template can only be created/updated/deleted with a repository"); + } + } + + private static String findContextTemplate(Metadata metadata, Context context) { + if (context == null) { + return null; + } + final boolean searchSpecificVersion = !Context.LATEST_VERSION.equals(context.version()); + return Optional.ofNullable(metadata.systemTemplatesLookup()) + .map(coll -> coll.get(context.name())) + .map(coll -> coll.get(searchSpecificVersion ? Long.parseLong(context.version()) : coll.lastKey())) + .orElse(null); + } + + public static boolean isSystemTemplate(ComponentTemplate componentTemplate) { + return Optional.ofNullable(componentTemplate) + .map(ComponentTemplate::metadata) + .map(md -> md.get(ClusterStateSystemTemplateLoader.TEMPLATE_TYPE_KEY)) + .filter(ob -> SystemTemplateMetadata.COMPONENT_TEMPLATE_TYPE.equals(ob.toString())) + .isPresent(); } public ClusterState addIndexTemplateV2( @@ -613,7 +690,8 @@ public ClusterState addIndexTemplateV2( template.priority(), template.version(), template.metadata(), - template.getDataStreamTemplate() + template.getDataStreamTemplate(), + template.context() ); } @@ -1140,7 +1218,7 @@ public static List collectMappings(final ClusterState state, .map(Template::mappings) .filter(Objects::nonNull) .collect(Collectors.toCollection(LinkedList::new)); - // Add the actual index template's mappings, since it takes the highest precedence + // Add the actual index template's mappings, since it takes the next precedence Optional.ofNullable(template.template()).map(Template::mappings).ifPresent(mappings::add); if (template.getDataStreamTemplate() != null && indexName.startsWith(DataStream.BACKING_INDEX_PREFIX)) { // add a default mapping for the timestamp field, at the lowest precedence, to make bootstrapping data streams more @@ -1165,6 +1243,15 @@ public static List collectMappings(final ClusterState state, }) .ifPresent(mappings::add); } + + // Now use context mappings which take the highest precedence + Optional.ofNullable(template.context()) + .map(ctx -> findContextTemplate(state.metadata(), ctx)) + .map(name -> state.metadata().componentTemplates().get(name)) + .map(ComponentTemplate::template) + .map(Template::mappings) + .ifPresent(mappings::add); + return Collections.unmodifiableList(mappings); } @@ -1226,8 +1313,14 @@ private static Settings resolveSettings(Metadata metadata, ComposableIndexTempla Settings.Builder templateSettings = Settings.builder(); componentSettings.forEach(templateSettings::put); - // Add the actual index template's settings to the end, since it takes the highest precedence. + // Add the actual index template's settings now, since it takes the next precedence. Optional.ofNullable(template.template()).map(Template::settings).ifPresent(templateSettings::put); + + // Add the template referred by context since it will take the highest precedence. + final String systemTemplate = findContextTemplate(metadata, template.context()); + final ComponentTemplate componentTemplate = metadata.componentTemplates().get(systemTemplate); + Optional.ofNullable(componentTemplate).map(ComponentTemplate::template).map(Template::settings).ifPresent(templateSettings::put); + return templateSettings.build(); } @@ -1269,8 +1362,16 @@ public static List> resolveAliases(final Metadata met .filter(Objects::nonNull) .collect(Collectors.toList()); - // Add the actual index template's aliases to the end if they exist + // Add the actual index template's aliases now if they exist Optional.ofNullable(template.template()).map(Template::aliases).ifPresent(aliases::add); + + // Now use context referenced template's aliases which take the highest precedence + if (template.context() != null) { + final String systemTemplate = findContextTemplate(metadata, template.context()); + final ComponentTemplate componentTemplate = metadata.componentTemplates().get(systemTemplate); + Optional.ofNullable(componentTemplate.template()).map(Template::aliases).ifPresent(aliases::add); + } + // Aliases are applied in order, but subsequent alias configuration from the same name is // ignored, so in order for the order to be correct, alias configuration should be in order // of precedence (with the index template first) diff --git a/server/src/test/java/org/opensearch/cluster/applicationtemplates/SystemTemplatesServiceTests.java b/server/src/test/java/org/opensearch/cluster/applicationtemplates/SystemTemplatesServiceTests.java index 4addf3802b40d..affb017264fdf 100644 --- a/server/src/test/java/org/opensearch/cluster/applicationtemplates/SystemTemplatesServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/applicationtemplates/SystemTemplatesServiceTests.java @@ -32,39 +32,51 @@ public class SystemTemplatesServiceTests extends OpenSearchTestCase { public void testSystemTemplatesLoaded() throws IOException { setupService(true); - systemTemplatesService.onClusterManager(); - SystemTemplatesService.Stats stats = systemTemplatesService.stats(); - assertNotNull(stats); - assertEquals(stats.getTemplatesLoaded(), 1L); - assertEquals(stats.getFailedLoadingTemplates(), 0L); - assertEquals(stats.getFailedLoadingRepositories(), 1L); + // First time load should happen, second time should short circuit. + for (int iter = 1; iter <= 2; iter++) { + systemTemplatesService.onClusterManager(); + SystemTemplatesService.Stats stats = systemTemplatesService.stats(); + assertNotNull(stats); + assertEquals(stats.getTemplatesLoaded(), iter % 2); + assertEquals(stats.getFailedLoadingTemplates(), 0L); + assertEquals(stats.getFailedLoadingRepositories(), iter % 2); + } } - public void testSystemTemplatesVerify() throws IOException { + public void testSystemTemplatesVerifyAndLoad() throws IOException { setupService(false); systemTemplatesService.verifyRepositories(); - SystemTemplatesService.Stats stats = systemTemplatesService.stats(); assertNotNull(stats); assertEquals(stats.getTemplatesLoaded(), 0L); assertEquals(stats.getFailedLoadingTemplates(), 0L); assertEquals(stats.getFailedLoadingRepositories(), 0L); + + systemTemplatesService.onClusterManager(); + stats = systemTemplatesService.stats(); + assertNotNull(stats); + assertEquals(stats.getTemplatesLoaded(), 1L); + assertEquals(stats.getFailedLoadingTemplates(), 0L); + assertEquals(stats.getFailedLoadingRepositories(), 0L); } public void testSystemTemplatesVerifyWithFailingRepository() throws IOException { setupService(true); - assertThrows(IllegalStateException.class, () -> systemTemplatesService.verifyRepositories()); + // Do it multiple times to ensure verify checks are always executed. + for (int i = 0; i < 2; i++) { + assertThrows(IllegalStateException.class, () -> systemTemplatesService.verifyRepositories()); - SystemTemplatesService.Stats stats = systemTemplatesService.stats(); - assertNotNull(stats); - assertEquals(stats.getTemplatesLoaded(), 0L); - assertEquals(stats.getFailedLoadingTemplates(), 0L); - assertEquals(stats.getFailedLoadingRepositories(), 1L); + SystemTemplatesService.Stats stats = systemTemplatesService.stats(); + assertNotNull(stats); + assertEquals(stats.getTemplatesLoaded(), 0L); + assertEquals(stats.getFailedLoadingTemplates(), 0L); + assertEquals(stats.getFailedLoadingRepositories(), 1L); + } } - void setupService(boolean errorFromMockPlugin) throws IOException { + private void setupService(boolean errorFromMockPlugin) throws IOException { FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES, true).build()); ThreadPool mockPool = Mockito.mock(ThreadPool.class); diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java index 6643d6e13289b..f26f45b69d133 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java @@ -36,6 +36,8 @@ import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.applicationtemplates.ClusterStateSystemTemplateLoader; +import org.opensearch.cluster.applicationtemplates.SystemTemplateMetadata; import org.opensearch.cluster.metadata.MetadataIndexTemplateService.PutRequest; import org.opensearch.cluster.routing.allocation.AwarenessReplicaBalance; import org.opensearch.cluster.service.ClusterService; @@ -45,6 +47,8 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsException; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.action.ActionListener; @@ -53,6 +57,8 @@ import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.env.Environment; +import org.opensearch.index.codec.CodecService; +import org.opensearch.index.engine.EngineConfig; import org.opensearch.index.mapper.MapperParsingException; import org.opensearch.index.mapper.MapperService; import org.opensearch.indices.DefaultRemoteStoreSettings; @@ -62,6 +68,7 @@ import org.opensearch.indices.SystemIndices; import org.opensearch.repositories.RepositoriesService; import org.opensearch.test.OpenSearchSingleNodeTestCase; +import org.opensearch.threadpool.ThreadPool; import java.io.IOException; import java.util.ArrayList; @@ -76,10 +83,14 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; import java.util.stream.Collectors; import static java.util.Collections.singletonList; +import static org.opensearch.cluster.applicationtemplates.ClusterStateSystemTemplateLoader.TEMPLATE_LOADER_IDENTIFIER; +import static org.opensearch.cluster.applicationtemplates.SystemTemplateMetadata.fromComponentTemplateInfo; import static org.opensearch.common.settings.Settings.builder; +import static org.opensearch.common.util.concurrent.ThreadContext.ACTION_ORIGIN_TRANSIENT_NAME; import static org.opensearch.env.Environment.PATH_HOME_SETTING; import static org.opensearch.index.mapper.DataStreamFieldMapper.Defaults.TIMESTAMP_FIELD; import static org.opensearch.indices.ShardLimitValidatorTests.createTestShardLimitService; @@ -656,6 +667,306 @@ public void onFailure(Exception e) { ); } + public void testPutGlobalV2TemplateWhichProvidesContextWithContextDisabled() throws Exception { + MetadataIndexTemplateService metadataIndexTemplateService = getMetadataIndexTemplateService(); + ComposableIndexTemplate globalIndexTemplate = new ComposableIndexTemplate( + List.of("*"), + null, + List.of(), + null, + null, + null, + null, + new Context("any") + ); + InvalidIndexTemplateException ex = expectThrows( + InvalidIndexTemplateException.class, + () -> metadataIndexTemplateService.putIndexTemplateV2( + "testing", + true, + "template-referencing-context-as-ct", + TimeValue.timeValueSeconds(30L), + globalIndexTemplate, + new ActionListener() { + @Override + public void onResponse(AcknowledgedResponse response) { + fail("the listener should not be invoked as validation should fail"); + } + + @Override + public void onFailure(Exception e) { + fail("the listener should not be invoked as validation should fail"); + } + } + ) + ); + assertTrue( + "Invalid exception message." + ex.getMessage(), + ex.getMessage().contains("specifies a context which cannot be used without enabling") + ); + } + + public void testPutGlobalV2TemplateWhichProvidesContextNotPresentInState() throws Exception { + FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES, true).build()); + MetadataIndexTemplateService metadataIndexTemplateService = getMetadataIndexTemplateService(); + ComposableIndexTemplate globalIndexTemplate = new ComposableIndexTemplate( + List.of("*"), + null, + List.of(), + null, + null, + null, + null, + new Context("any") + ); + InvalidIndexTemplateException ex = expectThrows( + InvalidIndexTemplateException.class, + () -> metadataIndexTemplateService.putIndexTemplateV2( + "testing", + true, + "template-referencing-context-as-ct", + TimeValue.timeValueSeconds(30L), + globalIndexTemplate, + new ActionListener() { + @Override + public void onResponse(AcknowledgedResponse response) { + fail("the listener should not be invoked as validation should fail"); + } + + @Override + public void onFailure(Exception e) { + fail("the listener should not be invoked as validation should fail"); + } + } + ) + ); + assertTrue( + "Invalid exception message." + ex.getMessage(), + ex.getMessage().contains("specifies a context which is not loaded on the cluster") + ); + } + + public void testPutGlobalV2TemplateWhichProvidesContextWithNonExistingVersion() throws Exception { + FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES, true).build()); + MetadataIndexTemplateService metadataIndexTemplateService = getMetadataIndexTemplateService(); + + Function templateApplier = codec -> new Template( + Settings.builder().put(EngineConfig.INDEX_CODEC_SETTING.getKey(), codec).build(), + null, + null + ); + ComponentTemplate systemTemplate = new ComponentTemplate( + templateApplier.apply(CodecService.BEST_COMPRESSION_CODEC), + 1L, + Map.of(ClusterStateSystemTemplateLoader.TEMPLATE_TYPE_KEY, SystemTemplateMetadata.COMPONENT_TEMPLATE_TYPE, "_version", 1L) + ); + SystemTemplateMetadata systemTemplateMetadata = fromComponentTemplateInfo("ct-best-compression-codec" + System.nanoTime(), 1); + + CountDownLatch waitToCreateComponentTemplate = new CountDownLatch(1); + ActionListener createComponentTemplateListener = new ActionListener() { + + @Override + public void onResponse(AcknowledgedResponse response) { + waitToCreateComponentTemplate.countDown(); + } + + @Override + public void onFailure(Exception e) { + fail("expecting the component template PUT to succeed but got: " + e.getMessage()); + } + }; + + ThreadContext threadContext = getInstanceFromNode(ThreadPool.class).getThreadContext(); + try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { + getInstanceFromNode(ThreadPool.class).getThreadContext().putTransient(ACTION_ORIGIN_TRANSIENT_NAME, TEMPLATE_LOADER_IDENTIFIER); + metadataIndexTemplateService.putComponentTemplate( + "test", + true, + systemTemplateMetadata.fullyQualifiedName(), + TimeValue.timeValueSeconds(30L), + systemTemplate, + createComponentTemplateListener + ); + } + + assertTrue("Could not create component templates", waitToCreateComponentTemplate.await(10, TimeUnit.SECONDS)); + + Context context = new Context(systemTemplateMetadata.name(), Long.toString(2L), Map.of()); + ComposableIndexTemplate globalIndexTemplate = new ComposableIndexTemplate( + List.of("*"), + templateApplier.apply(CodecService.LZ4), + List.of(), + null, + null, + null, + null, + context + ); + + InvalidIndexTemplateException ex = expectThrows( + InvalidIndexTemplateException.class, + () -> metadataIndexTemplateService.putIndexTemplateV2( + "testing", + true, + "template-referencing-context-as-ct", + TimeValue.timeValueSeconds(30L), + globalIndexTemplate, + new ActionListener() { + @Override + public void onResponse(AcknowledgedResponse response) { + fail("the listener should not be invoked as validation should fail"); + } + + @Override + public void onFailure(Exception e) { + fail("the listener should not be invoked as validation should fail"); + } + } + ) + ); + assertTrue( + "Invalid exception message." + ex.getMessage(), + ex.getMessage().contains("specifies a context which is not loaded on the cluster") + ); + } + + public void testPutGlobalV2TemplateWhichProvidesContextInComposedOfSection() throws Exception { + FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES, true).build()); + MetadataIndexTemplateService metadataIndexTemplateService = getMetadataIndexTemplateService(); + + Function templateApplier = codec -> new Template( + Settings.builder().put(EngineConfig.INDEX_CODEC_SETTING.getKey(), codec).build(), + null, + null + ); + ComponentTemplate systemTemplate = new ComponentTemplate( + templateApplier.apply(CodecService.BEST_COMPRESSION_CODEC), + 1L, + Map.of(ClusterStateSystemTemplateLoader.TEMPLATE_TYPE_KEY, SystemTemplateMetadata.COMPONENT_TEMPLATE_TYPE, "_version", 1L) + ); + SystemTemplateMetadata systemTemplateMetadata = fromComponentTemplateInfo("context-best-compression-codec" + System.nanoTime(), 1); + + CountDownLatch waitToCreateComponentTemplate = new CountDownLatch(1); + ActionListener createComponentTemplateListener = new ActionListener() { + + @Override + public void onResponse(AcknowledgedResponse response) { + waitToCreateComponentTemplate.countDown(); + } + + @Override + public void onFailure(Exception e) { + fail("expecting the component template PUT to succeed but got: " + e.getMessage()); + } + }; + ThreadContext context = getInstanceFromNode(ThreadPool.class).getThreadContext(); + try (ThreadContext.StoredContext ignore = context.stashContext()) { + getInstanceFromNode(ThreadPool.class).getThreadContext().putTransient(ACTION_ORIGIN_TRANSIENT_NAME, TEMPLATE_LOADER_IDENTIFIER); + metadataIndexTemplateService.putComponentTemplate( + "test", + true, + systemTemplateMetadata.fullyQualifiedName(), + TimeValue.timeValueSeconds(30L), + systemTemplate, + createComponentTemplateListener + ); + } + assertTrue("Could not create component templates", waitToCreateComponentTemplate.await(10, TimeUnit.SECONDS)); + + ComposableIndexTemplate globalIndexTemplate = new ComposableIndexTemplate( + List.of("*"), + templateApplier.apply(CodecService.LZ4), + List.of(systemTemplateMetadata.fullyQualifiedName()), + null, + null, + null, + null + ); + InvalidIndexTemplateException ex = expectThrows( + InvalidIndexTemplateException.class, + () -> metadataIndexTemplateService.putIndexTemplateV2( + "testing", + true, + "template-referencing-context-as-ct", + TimeValue.timeValueSeconds(30L), + globalIndexTemplate, + new ActionListener() { + @Override + public void onResponse(AcknowledgedResponse response) { + fail("the listener should not be invoked as validation should fail"); + } + + @Override + public void onFailure(Exception e) { + fail("the listener should not be invoked as validation should fail"); + } + } + ) + ); + assertTrue( + "Invalid exception message." + ex.getMessage(), + ex.getMessage().contains("specifies a component templates which can only be used in context") + ); + } + + public void testPutGlobalV2TemplateWhichProvidesContextWithSpecificVersion() throws Exception { + verifyTemplateCreationUsingContext("1"); + } + + public void testPutGlobalV2TemplateWhichProvidesContextWithLatestVersion() throws Exception { + verifyTemplateCreationUsingContext("_latest"); + } + + public void testModifySystemTemplateViaUnknownSource() throws Exception { + FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES, true).build()); + MetadataIndexTemplateService metadataIndexTemplateService = getMetadataIndexTemplateService(); + + Function templateApplier = codec -> new Template( + Settings.builder().put(EngineConfig.INDEX_CODEC_SETTING.getKey(), codec).build(), + null, + null + ); + + ComponentTemplate systemTemplate = new ComponentTemplate( + templateApplier.apply(CodecService.BEST_COMPRESSION_CODEC), + 1L, + Map.of(ClusterStateSystemTemplateLoader.TEMPLATE_TYPE_KEY, SystemTemplateMetadata.COMPONENT_TEMPLATE_TYPE, "_version", 1L) + ); + SystemTemplateMetadata systemTemplateMetadata = fromComponentTemplateInfo("ct-best-compression-codec" + System.nanoTime(), 1); + + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> metadataIndexTemplateService.putComponentTemplate( + "test", + true, + systemTemplateMetadata.fullyQualifiedName(), + TimeValue.timeValueSeconds(30L), + systemTemplate, + ActionListener.wrap(() -> {}) + ) + ); + assertTrue( + "Invalid exception message." + ex.getMessage(), + ex.getMessage().contains("A system template can only be created/updated/deleted with a repository") + ); + } + + public void testResolveSettingsWithContextVersion() throws Exception { + ClusterService clusterService = node().injector().getInstance(ClusterService.class); + final String indexTemplateName = verifyTemplateCreationUsingContext("1"); + + Settings settings = MetadataIndexTemplateService.resolveSettings(clusterService.state().metadata(), indexTemplateName); + assertThat(settings.get("index.codec"), equalTo(CodecService.BEST_COMPRESSION_CODEC)); + } + + public void testResolveSettingsWithContextLatest() throws Exception { + ClusterService clusterService = node().injector().getInstance(ClusterService.class); + final String indexTemplateName = verifyTemplateCreationUsingContext(Context.LATEST_VERSION); + + Settings settings = MetadataIndexTemplateService.resolveSettings(clusterService.state().metadata(), indexTemplateName); + assertThat(settings.get("index.codec"), equalTo(CodecService.ZLIB)); + } + /** * Test that if we have a pre-existing v2 template and put a "*" v1 template, we generate a warning */ @@ -1513,6 +1824,16 @@ public void testResolveAliases() throws Exception { ComponentTemplate ct2 = new ComponentTemplate(new Template(null, null, a2), null, null); state = service.addComponentTemplate(state, true, "ct_high", ct1); state = service.addComponentTemplate(state, true, "ct_low", ct2); + + Map a4 = Map.of("sys", AliasMetadata.builder("sys").build()); + ComponentTemplate sysTemplate = new ComponentTemplate( + new Template(null, null, a4), + 1L, + Map.of(ClusterStateSystemTemplateLoader.TEMPLATE_TYPE_KEY, SystemTemplateMetadata.COMPONENT_TEMPLATE_TYPE, "_version", 1) + ); + SystemTemplateMetadata systemTemplateMetadata = SystemTemplateMetadata.fromComponentTemplateInfo("sys-template", 1L); + state = service.addComponentTemplate(state, true, systemTemplateMetadata.fullyQualifiedName(), sysTemplate); + ComposableIndexTemplate it = new ComposableIndexTemplate( Collections.singletonList("i*"), new Template(null, null, a3), @@ -1520,14 +1841,15 @@ public void testResolveAliases() throws Exception { 0L, 1L, null, - null + null, + new Context(systemTemplateMetadata.name()) ); state = service.addIndexTemplateV2(state, true, "my-template", it); List> resolvedAliases = MetadataIndexTemplateService.resolveAliases(state.metadata(), "my-template"); - // These should be order of precedence, so the index template (a3), then ct_high (a1), then ct_low (a2) - assertThat(resolvedAliases, equalTo(Arrays.asList(a3, a1, a2))); + // These should be order of precedence, so the context(a4), index template (a3), then ct_high (a1), then ct_low (a2) + assertThat(resolvedAliases, equalTo(Arrays.asList(a4, a3, a1, a2))); } public void testAddInvalidTemplate() throws Exception { @@ -2067,7 +2389,8 @@ private static List putTemplate(NamedXContentRegistry xContentRegistr new AliasValidator(), null, new IndexScopedSettings(Settings.EMPTY, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS), - xContentRegistry + xContentRegistry, + null ); final List throwables = new ArrayList<>(); @@ -2190,4 +2513,132 @@ public static void assertTemplatesEqual(ComposableIndexTemplate actual, Composab } } } + + private String verifyTemplateCreationUsingContext(String contextVersion) throws Exception { + FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES, true).build()); + MetadataIndexTemplateService metadataIndexTemplateService = getMetadataIndexTemplateService(); + + Function templateApplier = codec -> new Template( + Settings.builder().put(EngineConfig.INDEX_CODEC_SETTING.getKey(), codec).build(), + null, + null + ); + + ComponentTemplate componentTemplate = new ComponentTemplate(templateApplier.apply(CodecService.DEFAULT_CODEC), 1L, new HashMap<>()); + + ComponentTemplate systemTemplate = new ComponentTemplate( + templateApplier.apply(CodecService.BEST_COMPRESSION_CODEC), + 1L, + Map.of(ClusterStateSystemTemplateLoader.TEMPLATE_TYPE_KEY, SystemTemplateMetadata.COMPONENT_TEMPLATE_TYPE, "_version", 1L) + ); + SystemTemplateMetadata systemTemplateMetadata = fromComponentTemplateInfo("ct-best-compression-codec" + System.nanoTime(), 1); + + ComponentTemplate systemTemplateV2 = new ComponentTemplate( + templateApplier.apply(CodecService.ZLIB), + 2L, + Map.of(ClusterStateSystemTemplateLoader.TEMPLATE_TYPE_KEY, SystemTemplateMetadata.COMPONENT_TEMPLATE_TYPE, "_version", 2L) + ); + SystemTemplateMetadata systemTemplateV2Metadata = fromComponentTemplateInfo(systemTemplateMetadata.name(), 2); + + CountDownLatch waitToCreateComponentTemplate = new CountDownLatch(3); + ActionListener createComponentTemplateListener = new ActionListener() { + + @Override + public void onResponse(AcknowledgedResponse response) { + waitToCreateComponentTemplate.countDown(); + } + + @Override + public void onFailure(Exception e) { + fail("expecting the component template PUT to succeed but got: " + e.getMessage()); + } + }; + + String componentTemplateName = "ct-default-codec" + System.nanoTime(); + metadataIndexTemplateService.putComponentTemplate( + "test", + true, + componentTemplateName, + TimeValue.timeValueSeconds(30L), + componentTemplate, + createComponentTemplateListener + ); + + ThreadContext threadContext = getInstanceFromNode(ThreadPool.class).getThreadContext(); + try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { + getInstanceFromNode(ThreadPool.class).getThreadContext().putTransient(ACTION_ORIGIN_TRANSIENT_NAME, TEMPLATE_LOADER_IDENTIFIER); + metadataIndexTemplateService.putComponentTemplate( + "test", + true, + systemTemplateMetadata.fullyQualifiedName(), + TimeValue.timeValueSeconds(30L), + systemTemplate, + createComponentTemplateListener + ); + + metadataIndexTemplateService.putComponentTemplate( + "test", + true, + systemTemplateV2Metadata.fullyQualifiedName(), + TimeValue.timeValueSeconds(30L), + systemTemplateV2, + createComponentTemplateListener + ); + } + + assertTrue("Could not create component templates", waitToCreateComponentTemplate.await(10, TimeUnit.SECONDS)); + + Context context = new Context(systemTemplateMetadata.name(), contextVersion, Map.of()); + ComposableIndexTemplate globalIndexTemplate = new ComposableIndexTemplate( + List.of("*"), + templateApplier.apply(CodecService.LZ4), + List.of(componentTemplateName), + null, + null, + null, + null, + context + ); + + String indexTemplateName = "template-referencing-ct-and-context"; + CountDownLatch waitForIndexTemplate = new CountDownLatch(1); + metadataIndexTemplateService.putIndexTemplateV2( + "testing", + true, + indexTemplateName, + TimeValue.timeValueSeconds(30L), + globalIndexTemplate, + new ActionListener() { + @Override + public void onResponse(AcknowledgedResponse response) { + waitForIndexTemplate.countDown(); + } + + @Override + public void onFailure(Exception e) { + fail("the listener should not be invoked as the template should succeed"); + } + } + ); + assertTrue("Expected index template to have been created.", waitForIndexTemplate.await(10, TimeUnit.SECONDS)); + assertTemplatesEqual( + node().injector().getInstance(ClusterService.class).state().metadata().templatesV2().get(indexTemplateName), + globalIndexTemplate + ); + + return indexTemplateName; + } + + @Override + protected boolean resetNodeAfterTest() { + return true; + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder() + .put(super.featureFlagSettings()) + .put(FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES, false) + .build(); + } } From 712ebfdac5c1a22acb0f6aff55170ce8336a718d Mon Sep 17 00:00:00 2001 From: Sooraj Sinha <81695996+soosinha@users.noreply.github.com> Date: Wed, 24 Jul 2024 20:28:15 +0530 Subject: [PATCH 16/68] Add changelog for remote state multi part upload fix (#14958) Signed-off-by: Sooraj Sinha --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 00560d68e4051..0d6312a76e0d0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -95,6 +95,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix NPE in ReplicaShardAllocator ([#14385](https://github.com/opensearch-project/OpenSearch/pull/14385)) - Fix constant_keyword field type used when creating index ([#14807](https://github.com/opensearch-project/OpenSearch/pull/14807)) - Use circuit breaker in InternalHistogram when adding empty buckets ([#14754](https://github.com/opensearch-project/OpenSearch/pull/14754)) +- Create new IndexInput for multi part upload ([#14888](https://github.com/opensearch-project/OpenSearch/pull/14888)) - Fix searchable snapshot failure with scripted fields ([#14411](https://github.com/opensearch-project/OpenSearch/pull/14411)) ### Security From 76be6155f5e637252dc1b60b51462a6787736b51 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 24 Jul 2024 11:10:43 -0400 Subject: [PATCH 17/68] Bump org.apache.commons:commons-lang3 from 3.14.0 to 3.15.0 in /plugins/repository-hdfs (#14861) * Bump org.apache.commons:commons-lang3 in /plugins/repository-hdfs Bumps org.apache.commons:commons-lang3 from 3.14.0 to 3.15.0. --- updated-dependencies: - dependency-name: org.apache.commons:commons-lang3 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + plugins/repository-hdfs/build.gradle | 2 +- plugins/repository-hdfs/licenses/commons-lang3-3.14.0.jar.sha1 | 1 - plugins/repository-hdfs/licenses/commons-lang3-3.15.0.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-hdfs/licenses/commons-lang3-3.14.0.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/commons-lang3-3.15.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 0d6312a76e0d0..3dc2f38b5f998 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -54,6 +54,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `jackson` from 2.17.1 to 2.17.2 ([#14687](https://github.com/opensearch-project/OpenSearch/pull/14687)) - Bump `net.minidev:json-smart` from 2.5.0 to 2.5.1 ([#14748](https://github.com/opensearch-project/OpenSearch/pull/14748)) - Bump `actions/checkout` from 2 to 4 ([#14858](https://github.com/opensearch-project/OpenSearch/pull/14858)) +- Bump `org.apache.commons:commons-lang3` from 3.14.0 to 3.15.0 ([#14861](https://github.com/opensearch-project/OpenSearch/pull/14861)) ### Changed - [Tiered Caching] Move query recomputation logic outside write lock ([#14187](https://github.com/opensearch-project/OpenSearch/pull/14187)) diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 63eb783649884..884fb1333404a 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -76,7 +76,7 @@ dependencies { api "org.apache.commons:commons-compress:${versions.commonscompress}" api 'org.apache.commons:commons-configuration2:2.11.0' api "commons-io:commons-io:${versions.commonsio}" - api 'org.apache.commons:commons-lang3:3.14.0' + api 'org.apache.commons:commons-lang3:3.15.0' implementation 'com.google.re2j:re2j:1.7' api 'javax.servlet:servlet-api:2.5' api "org.slf4j:slf4j-api:${versions.slf4j}" diff --git a/plugins/repository-hdfs/licenses/commons-lang3-3.14.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-lang3-3.14.0.jar.sha1 deleted file mode 100644 index d783e07e40902..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-lang3-3.14.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1ed471194b02f2c6cb734a0cd6f6f107c673afae \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-lang3-3.15.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-lang3-3.15.0.jar.sha1 new file mode 100644 index 0000000000000..4b1179c935946 --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-lang3-3.15.0.jar.sha1 @@ -0,0 +1 @@ +21581109b4be710ea4b195d5760392ec284f9f11 \ No newline at end of file From fcc231dfc349e092c3f68e49f49e32a062313f71 Mon Sep 17 00:00:00 2001 From: zhichao-aws Date: Wed, 24 Jul 2024 23:13:02 +0800 Subject: [PATCH 18/68] [BUG FIX] Fix the visit of inner query for NestedQueryBuilder (#14739) * fix nested query visit subquery Signed-off-by: zhichao-aws * add change log Signed-off-by: zhichao-aws --------- Signed-off-by: zhichao-aws Signed-off-by: Daniel (dB.) Doubrovkine Co-authored-by: Daniel (dB.) Doubrovkine --- CHANGELOG.md | 1 + .../opensearch/index/query/NestedQueryBuilder.java | 9 +++++++++ .../index/query/NestedQueryBuilderTests.java | 11 +++++++++++ 3 files changed, 21 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3dc2f38b5f998..fb1d060be684b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -98,6 +98,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Use circuit breaker in InternalHistogram when adding empty buckets ([#14754](https://github.com/opensearch-project/OpenSearch/pull/14754)) - Create new IndexInput for multi part upload ([#14888](https://github.com/opensearch-project/OpenSearch/pull/14888)) - Fix searchable snapshot failure with scripted fields ([#14411](https://github.com/opensearch-project/OpenSearch/pull/14411)) +- Fix the visit of inner query for NestedQueryBuilder ([#14739](https://github.com/opensearch-project/OpenSearch/pull/14739)) ### Security diff --git a/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java index b5ba79632b622..5908882472ce7 100644 --- a/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java @@ -34,6 +34,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.ReaderUtil; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.MultiCollector; import org.apache.lucene.search.Query; @@ -505,4 +506,12 @@ public TopDocsAndMaxScore topDocs(SearchHit hit) throws IOException { } } } + + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + if (query != null) { + visitor.getChildVisitor(BooleanClause.Occur.MUST).accept(query); + } + } } diff --git a/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java index f72bd76913c8f..351011eb1b812 100644 --- a/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java @@ -59,8 +59,10 @@ import org.hamcrest.Matchers; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Optional; @@ -565,4 +567,13 @@ void doWithDepth(int depth, ThrowingConsumer test) throws Exc ); } } + + public void testVisit() { + NestedQueryBuilder builder = new NestedQueryBuilder("path", new MatchAllQueryBuilder(), ScoreMode.None); + + List visitedQueries = new ArrayList<>(); + builder.visit(createTestVisitor(visitedQueries)); + + assertEquals(2, visitedQueries.size()); + } } From 157d27700157f3e24ef8b150b542e78d788bddab Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Thu, 25 Jul 2024 11:15:21 -0500 Subject: [PATCH 19/68] Forward port 2.16 release notes (#14975) Signed-off-by: Andrew Ross --- CHANGELOG.md | 83 ----------------- .../opensearch.release-notes-2.16.0.md | 92 +++++++++++++++++++ 2 files changed, 92 insertions(+), 83 deletions(-) create mode 100644 release-notes/opensearch.release-notes-2.16.0.md diff --git a/CHANGELOG.md b/CHANGELOG.md index fb1d060be684b..e88a084f7d7f6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,100 +5,17 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added -- Add fingerprint ingest processor ([#13724](https://github.com/opensearch-project/OpenSearch/pull/13724)) -- [Remote Store] Rate limiter for remote store low priority uploads ([#14374](https://github.com/opensearch-project/OpenSearch/pull/14374/)) -- Apply the date histogram rewrite optimization to range aggregation ([#13865](https://github.com/opensearch-project/OpenSearch/pull/13865)) -- [Writable Warm] Add composite directory implementation and integrate it with FileCache ([12782](https://github.com/opensearch-project/OpenSearch/pull/12782)) -- [Workload Management] Add QueryGroup schema ([13669](https://github.com/opensearch-project/OpenSearch/pull/13669)) -- Add batching supported processor base type AbstractBatchingProcessor ([#14554](https://github.com/opensearch-project/OpenSearch/pull/14554)) -- Fix race condition while parsing derived fields from search definition ([14445](https://github.com/opensearch-project/OpenSearch/pull/14445)) -- Add `strict_allow_templates` dynamic mapping option ([#14555](https://github.com/opensearch-project/OpenSearch/pull/14555)) -- Add allowlist setting for ingest-common and search-pipeline-common processors ([#14439](https://github.com/opensearch-project/OpenSearch/issues/14439)) -- [Workload Management] add queryGroupId header propagator across requests and nodes ([#14614](https://github.com/opensearch-project/OpenSearch/pull/14614)) -- Create SystemIndexRegistry with helper method matchesSystemIndex ([#14415](https://github.com/opensearch-project/OpenSearch/pull/14415)) -- Print reason why parent task was cancelled ([#14604](https://github.com/opensearch-project/OpenSearch/issues/14604)) -- Add matchesPluginSystemIndexPattern to SystemIndexRegistry ([#14750](https://github.com/opensearch-project/OpenSearch/pull/14750)) -- Add Plugin interface for loading application based configuration templates (([#14659](https://github.com/opensearch-project/OpenSearch/issues/14659))) -- Refactor remote-routing-table service inline with remote state interfaces([#14668](https://github.com/opensearch-project/OpenSearch/pull/14668)) -- Add shard-diff path to diff manifest to reduce number of read calls remote store (([#14684](https://github.com/opensearch-project/OpenSearch/pull/14684))) -- Add SortResponseProcessor to Search Pipelines (([#14785](https://github.com/opensearch-project/OpenSearch/issues/14785))) -- Add prefix mode verification setting for repository verification (([#14790](https://github.com/opensearch-project/OpenSearch/pull/14790))) -- Add SplitResponseProcessor to Search Pipelines (([#14800](https://github.com/opensearch-project/OpenSearch/issues/14800))) -- Optimize TransportNodesAction to not send DiscoveryNodes for NodeStats, NodesInfo and ClusterStats call ([14749](https://github.com/opensearch-project/OpenSearch/pull/14749)) -- Reduce logging in DEBUG for MasterService:run ([#14795](https://github.com/opensearch-project/OpenSearch/pull/14795)) -- Enabling term version check on local state for all ClusterManager Read Transport Actions ([#14273](https://github.com/opensearch-project/OpenSearch/pull/14273)) -- Add persian_stem filter (([#14847](https://github.com/opensearch-project/OpenSearch/pull/14847))) -- Create listener to refresh search thread resource usage ([#14832](https://github.com/opensearch-project/OpenSearch/pull/14832)) -- Add rest, transport layer changes for hot to warm tiering - dedicated setup (([#13980](https://github.com/opensearch-project/OpenSearch/pull/13980)) -- Optimize Cluster Stats Indices to precomute node level stats ([#14426](https://github.com/opensearch-project/OpenSearch/pull/14426)) -- Add logic to create index templates (v2) using context field ([#14811](https://github.com/opensearch-project/OpenSearch/pull/14811)) ### Dependencies -- Bump `org.gradle.test-retry` from 1.5.8 to 1.5.9 ([#13442](https://github.com/opensearch-project/OpenSearch/pull/13442)) -- Update to Apache Lucene 9.11.0 ([#14042](https://github.com/opensearch-project/OpenSearch/pull/14042)) -- Bump `netty` from 4.1.110.Final to 4.1.111.Final ([#14356](https://github.com/opensearch-project/OpenSearch/pull/14356)) -- Bump `org.wiremock:wiremock-standalone` from 3.3.1 to 3.6.0 ([#14361](https://github.com/opensearch-project/OpenSearch/pull/14361)) -- Bump `reactor` from 3.5.17 to 3.5.19 ([#14395](https://github.com/opensearch-project/OpenSearch/pull/14395), [#14697](https://github.com/opensearch-project/OpenSearch/pull/14697)) -- Bump `reactor-netty` from 1.1.19 to 1.1.21 ([#14395](https://github.com/opensearch-project/OpenSearch/pull/14395), [#14697](https://github.com/opensearch-project/OpenSearch/pull/14697)) -- Bump `commons-net:commons-net` from 3.10.0 to 3.11.1 ([#14396](https://github.com/opensearch-project/OpenSearch/pull/14396)) -- Bump `com.nimbusds:nimbus-jose-jwt` from 9.37.3 to 9.40 ([#14398](https://github.com/opensearch-project/OpenSearch/pull/14398)) -- Bump `org.apache.commons:commons-configuration2` from 2.10.1 to 2.11.0 ([#14399](https://github.com/opensearch-project/OpenSearch/pull/14399)) -- Bump `com.gradle.develocity` from 3.17.4 to 3.17.6 ([#14397](https://github.com/opensearch-project/OpenSearch/pull/14397), [#14856](https://github.com/opensearch-project/OpenSearch/pull/14856)) -- Bump `opentelemetry` from 1.36.0 to 1.40.0 ([#14457](https://github.com/opensearch-project/OpenSearch/pull/14457), [#14674](https://github.com/opensearch-project/OpenSearch/pull/14674)) -- Bump `opentelemetry-semconv` from 1.25.0-alpha to 1.26.0-alpha ([#14674](https://github.com/opensearch-project/OpenSearch/pull/14674)) -- Bump `azure-identity` from 1.11.4 to 1.13.0, Bump `msal4j` from 1.14.3 to 1.15.1, Bump `msal4j-persistence-extension` from 1.2.0 to 1.3.0 ([#14506](https://github.com/opensearch-project/OpenSearch/pull/14673)) -- Bump `com.azure:azure-storage-common` from 12.21.2 to 12.25.1 ([#14517](https://github.com/opensearch-project/OpenSearch/pull/14517)) -- Bump `com.microsoft.azure:msal4j` from 1.15.1 to 1.16.1 ([#14610](https://github.com/opensearch-project/OpenSearch/pull/14610), [#14857](https://github.com/opensearch-project/OpenSearch/pull/14857)) -- Bump `com.github.spullara.mustache.java:compiler` from 0.9.13 to 0.9.14 ([#14672](https://github.com/opensearch-project/OpenSearch/pull/14672)) -- Bump `net.minidev:accessors-smart` from 2.5.0 to 2.5.1 ([#14673](https://github.com/opensearch-project/OpenSearch/pull/14673)) -- Bump `jackson` from 2.17.1 to 2.17.2 ([#14687](https://github.com/opensearch-project/OpenSearch/pull/14687)) -- Bump `net.minidev:json-smart` from 2.5.0 to 2.5.1 ([#14748](https://github.com/opensearch-project/OpenSearch/pull/14748)) -- Bump `actions/checkout` from 2 to 4 ([#14858](https://github.com/opensearch-project/OpenSearch/pull/14858)) - Bump `org.apache.commons:commons-lang3` from 3.14.0 to 3.15.0 ([#14861](https://github.com/opensearch-project/OpenSearch/pull/14861)) ### Changed -- [Tiered Caching] Move query recomputation logic outside write lock ([#14187](https://github.com/opensearch-project/OpenSearch/pull/14187)) -- unsignedLongRangeQuery now returns MatchNoDocsQuery if the lower bounds are greater than the upper bounds ([#14416](https://github.com/opensearch-project/OpenSearch/pull/14416)) -- Updated the `indices.query.bool.max_clause_count` setting from being static to dynamically updateable ([#13568](https://github.com/opensearch-project/OpenSearch/pull/13568)) -- Make the class CommunityIdProcessor final ([#14448](https://github.com/opensearch-project/OpenSearch/pull/14448)) -- Allow @InternalApi annotation on classes not meant to be constructed outside of the OpenSearch core ([#14575](https://github.com/opensearch-project/OpenSearch/pull/14575)) -- Add @InternalApi annotation to japicmp exclusions ([#14597](https://github.com/opensearch-project/OpenSearch/pull/14597)) -- Allow system index warning in OpenSearchRestTestCase.refreshAllIndices ([#14635](https://github.com/opensearch-project/OpenSearch/pull/14635)) -- Make reroute iteration time-bound for large shard allocations ([#14848](https://github.com/opensearch-project/OpenSearch/pull/14848)) ### Deprecated -- Deprecate batch_size parameter on bulk API ([#14725](https://github.com/opensearch-project/OpenSearch/pull/14725)) ### Removed -- Remove query categorization changes ([#14759](https://github.com/opensearch-project/OpenSearch/pull/14759)) ### Fixed -- Fix allowUnmappedFields, mapUnmappedFieldAsString settings are not applied when parsing certain types of query string query ([#13957](https://github.com/opensearch-project/OpenSearch/pull/13957)) -- Fix bug in SBP cancellation logic ([#13259](https://github.com/opensearch-project/OpenSearch/pull/13474)) -- Fix handling of Short and Byte data types in ScriptProcessor ingest pipeline ([#14379](https://github.com/opensearch-project/OpenSearch/issues/14379)) -- Switch to iterative version of WKT format parser ([#14086](https://github.com/opensearch-project/OpenSearch/pull/14086)) -- Fix match_phrase_prefix_query not working on text field with multiple values and index_prefixes ([#10959](https://github.com/opensearch-project/OpenSearch/pull/10959)) -- Fix the computed max shards of cluster to avoid int overflow ([#14155](https://github.com/opensearch-project/OpenSearch/pull/14155)) -- Fixed rest-high-level client searchTemplate & mtermVectors endpoints to have a leading slash ([#14465](https://github.com/opensearch-project/OpenSearch/pull/14465)) -- Write shard level metadata blob when snapshotting searchable snapshot indexes ([#13190](https://github.com/opensearch-project/OpenSearch/pull/13190)) -- Fix aggs result of NestedAggregator with sub NestedAggregator ([#13324](https://github.com/opensearch-project/OpenSearch/pull/13324)) -- Fix fs info reporting negative available size ([#11573](https://github.com/opensearch-project/OpenSearch/pull/11573)) -- Add ListPitInfo::getKeepAlive() getter ([#14495](https://github.com/opensearch-project/OpenSearch/pull/14495)) -- Fix FuzzyQuery in keyword field will use IndexOrDocValuesQuery when both of index and doc_value are true ([#14378](https://github.com/opensearch-project/OpenSearch/pull/14378)) -- Fix file cache initialization ([#14004](https://github.com/opensearch-project/OpenSearch/pull/14004)) -- Handle NPE in GetResult if "found" field is missing ([#14552](https://github.com/opensearch-project/OpenSearch/pull/14552)) -- Fix create or update alias API doesn't throw exception for unsupported parameters ([#14719](https://github.com/opensearch-project/OpenSearch/pull/14719)) -- Refactoring FilterPath.parse by using an iterative approach ([#14200](https://github.com/opensearch-project/OpenSearch/pull/14200)) -- Refactoring Grok.validatePatternBank by using an iterative approach ([#14206](https://github.com/opensearch-project/OpenSearch/pull/14206)) -- Fix NPE when creating index with index.number_of_replicas set to null ([#14812](https://github.com/opensearch-project/OpenSearch/pull/14812)) -- Update help output for _cat ([#14722](https://github.com/opensearch-project/OpenSearch/pull/14722)) -- Fix bulk upsert ignores the default_pipeline and final_pipeline when auto-created index matches the index template ([#12891](https://github.com/opensearch-project/OpenSearch/pull/12891)) -- Fix NPE in ReplicaShardAllocator ([#14385](https://github.com/opensearch-project/OpenSearch/pull/14385)) -- Fix constant_keyword field type used when creating index ([#14807](https://github.com/opensearch-project/OpenSearch/pull/14807)) -- Use circuit breaker in InternalHistogram when adding empty buckets ([#14754](https://github.com/opensearch-project/OpenSearch/pull/14754)) -- Create new IndexInput for multi part upload ([#14888](https://github.com/opensearch-project/OpenSearch/pull/14888)) -- Fix searchable snapshot failure with scripted fields ([#14411](https://github.com/opensearch-project/OpenSearch/pull/14411)) -- Fix the visit of inner query for NestedQueryBuilder ([#14739](https://github.com/opensearch-project/OpenSearch/pull/14739)) ### Security diff --git a/release-notes/opensearch.release-notes-2.16.0.md b/release-notes/opensearch.release-notes-2.16.0.md new file mode 100644 index 0000000000000..193aa6b53714c --- /dev/null +++ b/release-notes/opensearch.release-notes-2.16.0.md @@ -0,0 +1,92 @@ +## 2024-07-24 Version 2.16.0 Release Notes + +## [2.16.0] +### Added +- Add fingerprint ingest processor ([#13724](https://github.com/opensearch-project/OpenSearch/pull/13724)) +- [Remote Store] Rate limiter for remote store low priority uploads ([#14374](https://github.com/opensearch-project/OpenSearch/pull/14374/)) +- Apply the date histogram rewrite optimization to range aggregation ([#13865](https://github.com/opensearch-project/OpenSearch/pull/13865)) +- [Writable Warm] Add composite directory implementation and integrate it with FileCache ([12782](https://github.com/opensearch-project/OpenSearch/pull/12782)) +- [Workload Management] Add QueryGroup schema ([13669](https://github.com/opensearch-project/OpenSearch/pull/13669)) +- Add batching supported processor base type AbstractBatchingProcessor ([#14554](https://github.com/opensearch-project/OpenSearch/pull/14554)) +- Fix race condition while parsing derived fields from search definition ([14445](https://github.com/opensearch-project/OpenSearch/pull/14445)) +- Add `strict_allow_templates` dynamic mapping option ([#14555](https://github.com/opensearch-project/OpenSearch/pull/14555)) +- Add allowlist setting for ingest-common and search-pipeline-common processors ([#14439](https://github.com/opensearch-project/OpenSearch/issues/14439)) +- [Workload Management] add queryGroupId header propagator across requests and nodes ([#14614](https://github.com/opensearch-project/OpenSearch/pull/14614)) +- Create SystemIndexRegistry with helper method matchesSystemIndex ([#14415](https://github.com/opensearch-project/OpenSearch/pull/14415)) +- Print reason why parent task was cancelled ([#14604](https://github.com/opensearch-project/OpenSearch/issues/14604)) +- Add matchesPluginSystemIndexPattern to SystemIndexRegistry ([#14750](https://github.com/opensearch-project/OpenSearch/pull/14750)) +- Add Plugin interface for loading application based configuration templates (([#14659](https://github.com/opensearch-project/OpenSearch/issues/14659))) +- Refactor remote-routing-table service inline with remote state interfaces([#14668](https://github.com/opensearch-project/OpenSearch/pull/14668)) +- Add shard-diff path to diff manifest to reduce number of read calls remote store (([#14684](https://github.com/opensearch-project/OpenSearch/pull/14684))) +- Add SortResponseProcessor to Search Pipelines (([#14785](https://github.com/opensearch-project/OpenSearch/issues/14785))) +- Add prefix mode verification setting for repository verification (([#14790](https://github.com/opensearch-project/OpenSearch/pull/14790))) +- Add SplitResponseProcessor to Search Pipelines (([#14800](https://github.com/opensearch-project/OpenSearch/issues/14800))) +- Optimize TransportNodesAction to not send DiscoveryNodes for NodeStats, NodesInfo and ClusterStats call ([14749](https://github.com/opensearch-project/OpenSearch/pull/14749)) +- Reduce logging in DEBUG for MasterService:run ([#14795](https://github.com/opensearch-project/OpenSearch/pull/14795)) +- Refactor remote-routing-table service inline with remote state interfaces([#14668](https://github.com/opensearch-project/OpenSearch/pull/14668)) +- Add rest, transport layer changes for hot to warm tiering - dedicated setup (([#13980](https://github.com/opensearch-project/OpenSearch/pull/13980)) +- Enabling term version check on local state for all ClusterManager Read Transport Actions ([#14273](https://github.com/opensearch-project/OpenSearch/pull/14273)) +- Optimize Cluster Stats Indices to precomute node level stats ([#14426](https://github.com/opensearch-project/OpenSearch/pull/14426)) +- Create listener to refresh search thread resource usage ([#14832](https://github.com/opensearch-project/OpenSearch/pull/14832)) +- Add logic to create index templates (v2) using context field ([#14811](https://github.com/opensearch-project/OpenSearch/pull/14811)) + +### Dependencies +- Update to Apache Lucene 9.11.1 ([#14042](https://github.com/opensearch-project/OpenSearch/pull/14042), [#14576](https://github.com/opensearch-project/OpenSearch/pull/14576)) +- Bump `netty` from 4.1.110.Final to 4.1.111.Final ([#14356](https://github.com/opensearch-project/OpenSearch/pull/14356)) +- Bump `org.wiremock:wiremock-standalone` from 3.3.1 to 3.6.0 ([#14361](https://github.com/opensearch-project/OpenSearch/pull/14361)) +- Bump `reactor` from 3.5.17 to 3.5.19 ([#14395](https://github.com/opensearch-project/OpenSearch/pull/14395), [#14697](https://github.com/opensearch-project/OpenSearch/pull/14697)) +- Bump `reactor-netty` from 1.1.19 to 1.1.21 ([#14395](https://github.com/opensearch-project/OpenSearch/pull/14395), [#14697](https://github.com/opensearch-project/OpenSearch/pull/14697)) +- Bump `commons-net:commons-net` from 3.10.0 to 3.11.1 ([#14396](https://github.com/opensearch-project/OpenSearch/pull/14396)) +- Bump `com.nimbusds:nimbus-jose-jwt` from 9.37.3 to 9.40 ([#14398](https://github.com/opensearch-project/OpenSearch/pull/14398)) +- Bump `org.apache.commons:commons-configuration2` from 2.10.1 to 2.11.0 ([#14399](https://github.com/opensearch-project/OpenSearch/pull/14399)) +- Bump `com.gradle.develocity` from 3.17.4 to 3.17.5 ([#14397](https://github.com/opensearch-project/OpenSearch/pull/14397)) +- Bump `opentelemetry` from 1.36.0 to 1.40.0 ([#14457](https://github.com/opensearch-project/OpenSearch/pull/14457), [#14674](https://github.com/opensearch-project/OpenSearch/pull/14674)) +- Bump `opentelemetry-semconv` from 1.25.0-alpha to 1.26.0-alpha ([#14674](https://github.com/opensearch-project/OpenSearch/pull/14674)) +- Bump `azure-identity` from 1.11.4 to 1.13.0, Bump `msal4j` from 1.14.3 to 1.15.1, Bump `msal4j-persistence-extension` from 1.2.0 to 1.3.0 ([#14506](https://github.com/opensearch-project/OpenSearch/pull/14673)) +- Bump `com.azure:azure-storage-common` from 12.21.2 to 12.25.1 ([#14517](https://github.com/opensearch-project/OpenSearch/pull/14517)) +- Bump `com.microsoft.azure:msal4j` from 1.15.1 to 1.16.0 ([#14610](https://github.com/opensearch-project/OpenSearch/pull/14610)) +- Bump `com.github.spullara.mustache.java:compiler` from 0.9.13 to 0.9.14 ([#14672](https://github.com/opensearch-project/OpenSearch/pull/14672)) +- Bump `net.minidev:accessors-smart` from 2.5.0 to 2.5.1 ([#14673](https://github.com/opensearch-project/OpenSearch/pull/14673)) +- Bump `jackson` from 2.17.1 to 2.17.2 ([#14687](https://github.com/opensearch-project/OpenSearch/pull/14687)) +- Bump `net.minidev:json-smart` from 2.5.0 to 2.5.1 ([#14748](https://github.com/opensearch-project/OpenSearch/pull/14748)) + +### Changed +- [Tiered Caching] Move query recomputation logic outside write lock ([#14187](https://github.com/opensearch-project/OpenSearch/pull/14187)) +- unsignedLongRangeQuery now returns MatchNoDocsQuery if the lower bounds are greater than the upper bounds ([#14416](https://github.com/opensearch-project/OpenSearch/pull/14416)) +- Make the class CommunityIdProcessor final ([#14448](https://github.com/opensearch-project/OpenSearch/pull/14448)) +- Updated the `indices.query.bool.max_clause_count` setting from being static to dynamically updateable ([#13568](https://github.com/opensearch-project/OpenSearch/pull/13568)) +- Allow @InternalApi annotation on classes not meant to be constructed outside of the OpenSearch core ([#14575](https://github.com/opensearch-project/OpenSearch/pull/14575)) +- Add @InternalApi annotation to japicmp exclusions ([#14597](https://github.com/opensearch-project/OpenSearch/pull/14597)) +- Allow system index warning in OpenSearchRestTestCase.refreshAllIndices ([#14635](https://github.com/opensearch-project/OpenSearch/pull/14635)) +- Make reroute iteration time-bound for large shard allocations ([#14848](https://github.com/opensearch-project/OpenSearch/pull/14848)) + +### Deprecated +- Deprecate batch_size parameter on bulk API ([#14725](https://github.com/opensearch-project/OpenSearch/pull/14725)) + +### Removed +- Remove query categorization changes ([#14759](https://github.com/opensearch-project/OpenSearch/pull/14759)) + +### Fixed +- Fix bug in SBP cancellation logic ([#13259](https://github.com/opensearch-project/OpenSearch/pull/13474)) +- Fix handling of Short and Byte data types in ScriptProcessor ingest pipeline ([#14379](https://github.com/opensearch-project/OpenSearch/issues/14379)) +- Switch to iterative version of WKT format parser ([#14086](https://github.com/opensearch-project/OpenSearch/pull/14086)) +- Fix match_phrase_prefix_query not working on text field with multiple values and index_prefixes ([#10959](https://github.com/opensearch-project/OpenSearch/pull/10959)) +- Fix the computed max shards of cluster to avoid int overflow ([#14155](https://github.com/opensearch-project/OpenSearch/pull/14155)) +- Fixed rest-high-level client searchTemplate & mtermVectors endpoints to have a leading slash ([#14465](https://github.com/opensearch-project/OpenSearch/pull/14465)) +- Write shard level metadata blob when snapshotting searchable snapshot indexes ([#13190](https://github.com/opensearch-project/OpenSearch/pull/13190)) +- Fix aggs result of NestedAggregator with sub NestedAggregator ([#13324](https://github.com/opensearch-project/OpenSearch/pull/13324)) +- Fix fs info reporting negative available size ([#11573](https://github.com/opensearch-project/OpenSearch/pull/11573)) +- Add ListPitInfo::getKeepAlive() getter ([#14495](https://github.com/opensearch-project/OpenSearch/pull/14495)) +- Fix FuzzyQuery in keyword field will use IndexOrDocValuesQuery when both of index and doc_value are true ([#14378](https://github.com/opensearch-project/OpenSearch/pull/14378)) +- Fix file cache initialization ([#14004](https://github.com/opensearch-project/OpenSearch/pull/14004)) +- Handle NPE in GetResult if "found" field is missing ([#14552](https://github.com/opensearch-project/OpenSearch/pull/14552)) +- Fix create or update alias API doesn't throw exception for unsupported parameters ([#14719](https://github.com/opensearch-project/OpenSearch/pull/14719)) +- Refactoring FilterPath.parse by using an iterative approach ([#14200](https://github.com/opensearch-project/OpenSearch/pull/14200)) +- Refactoring Grok.validatePatternBank by using an iterative approach ([#14206](https://github.com/opensearch-project/OpenSearch/pull/14206)) +- Fix NPE when creating index with index.number_of_replicas set to null ([#14812](https://github.com/opensearch-project/OpenSearch/pull/14812)) +- Update help output for _cat ([#14722](https://github.com/opensearch-project/OpenSearch/pull/14722)) +- Fix bulk upsert ignores the default_pipeline and final_pipeline when auto-created index matches the index template ([#12891](https://github.com/opensearch-project/OpenSearch/pull/12891)) +- Fix NPE in ReplicaShardAllocator ([#14385](https://github.com/opensearch-project/OpenSearch/pull/14385)) +- Use circuit breaker in InternalHistogram when adding empty buckets ([#14754](https://github.com/opensearch-project/OpenSearch/pull/14754)) +- Create new IndexInput for multi part upload ([#14888](https://github.com/opensearch-project/OpenSearch/pull/14888)) +- Fix searchable snapshot failure with scripted fields ([#14411](https://github.com/opensearch-project/OpenSearch/pull/14411)) From 59302a3d5ea255be7f2bb72187b8df1f0aa33572 Mon Sep 17 00:00:00 2001 From: Mohit Godwani <81609427+mgodwan@users.noreply.github.com> Date: Fri, 26 Jul 2024 18:37:24 +0530 Subject: [PATCH 20/68] Fix version check after backport (#14985) Signed-off-by: Mohit Godwani --- .../opensearch/cluster/metadata/ComposableIndexTemplate.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java b/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java index 594dda83c41e2..63bbe4144c4fb 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java @@ -184,7 +184,7 @@ public ComposableIndexTemplate(StreamInput in) throws IOException { this.version = in.readOptionalVLong(); this.metadata = in.readMap(); this.dataStreamTemplate = in.readOptionalWriteable(DataStreamTemplate::new); - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_16_0)) { this.context = in.readOptionalWriteable(Context::new); } else { this.context = null; @@ -248,7 +248,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalVLong(this.version); out.writeMap(this.metadata); out.writeOptionalWriteable(dataStreamTemplate); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_16_0)) { out.writeOptionalWriteable(context); } } From d08c4253e18981d688253d20fb967e614923a957 Mon Sep 17 00:00:00 2001 From: Rahul Karajgikar <50844303+rahulkarajgikar@users.noreply.github.com> Date: Mon, 29 Jul 2024 18:18:44 +0530 Subject: [PATCH 21/68] [Batch Fetch] Fix for hasInitiatedFetching to fix allocation explain and manual reroute APIs (#14972) * Fix for hasInitiatedFetching() in batch mode Signed-off-by: Rahul Karajgikar --- CHANGELOG.md | 1 + .../gateway/RecoveryFromGatewayIT.java | 160 +++++++++++++++++- .../gateway/AsyncShardBatchFetch.java | 8 + .../gateway/ReplicaShardBatchAllocator.java | 2 +- .../gateway/ShardsBatchGatewayAllocator.java | 31 +++- 5 files changed, 191 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e88a084f7d7f6..d4c8c955bced4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added +- Fix for hasInitiatedFetching to fix allocation explain and manual reroute APIs (([#14972](https://github.com/opensearch-project/OpenSearch/pull/14972)) ### Dependencies - Bump `org.apache.commons:commons-lang3` from 3.14.0 to 3.15.0 ([#14861](https://github.com/opensearch-project/OpenSearch/pull/14861)) diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java index 4085cc3890f30..eccc903dfac82 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java @@ -57,6 +57,7 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.UnassignedInfo; +import org.opensearch.cluster.routing.allocation.AllocateUnassignedDecision; import org.opensearch.cluster.routing.allocation.AllocationDecision; import org.opensearch.cluster.routing.allocation.ExistingShardsAllocator; import org.opensearch.cluster.service.ClusterService; @@ -797,11 +798,26 @@ public void testBatchModeEnabledWithoutTimeout() throws Exception { ); assertTrue(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_BATCH_MODE.get(internalCluster().clusterService().getSettings())); assertEquals(1, gatewayAllocator.getNumberOfStartedShardBatches()); - assertEquals(1, gatewayAllocator.getNumberOfStoreShardBatches()); + // Replica shard would be marked ineligible since there are no data nodes. + // It would then be removed from any batch and batches would get deleted, so we would have 0 replica batches + assertEquals(0, gatewayAllocator.getNumberOfStoreShardBatches()); - // Now start both data nodes and ensure batch mode is working - logger.info("--> restarting the stopped nodes"); + // Now start one data node + logger.info("--> restarting the first stopped node"); internalCluster().startDataOnlyNode(Settings.builder().put("node.name", dataOnlyNodes.get(0)).put(node0DataPathSettings).build()); + ensureStableCluster(2); + ensureYellow("test"); + assertEquals(0, gatewayAllocator.getNumberOfStartedShardBatches()); + assertEquals(0, gatewayAllocator.getNumberOfStoreShardBatches()); + assertEquals(0, gatewayAllocator.getNumberOfInFlightFetches()); + + // calling reroute and asserting on reroute response + logger.info("--> calling reroute while cluster is yellow"); + clusterRerouteResponse = client().admin().cluster().prepareReroute().setRetryFailed(true).get(); + assertTrue(clusterRerouteResponse.isAcknowledged()); + + // Now start last data node and ensure batch mode is working and cluster goes green + logger.info("--> restarting the second stopped node"); internalCluster().startDataOnlyNode(Settings.builder().put("node.name", dataOnlyNodes.get(1)).put(node1DataPathSettings).build()); ensureStableCluster(3); ensureGreen("test"); @@ -842,11 +858,26 @@ public void testBatchModeEnabledWithSufficientTimeoutAndClusterGreen() throws Ex ); assertTrue(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_BATCH_MODE.get(internalCluster().clusterService().getSettings())); assertEquals(1, gatewayAllocator.getNumberOfStartedShardBatches()); - assertEquals(1, gatewayAllocator.getNumberOfStoreShardBatches()); + // Replica shard would be marked ineligible since there are no data nodes. + // It would then be removed from any batch and batches would get deleted, so we would have 0 replica batches + assertEquals(0, gatewayAllocator.getNumberOfStoreShardBatches()); - // Now start both data nodes and ensure batch mode is working - logger.info("--> restarting the stopped nodes"); + // Now start one data nodes and ensure batch mode is working + logger.info("--> restarting the first stopped node"); internalCluster().startDataOnlyNode(Settings.builder().put("node.name", dataOnlyNodes.get(0)).put(node0DataPathSettings).build()); + ensureStableCluster(2); + ensureYellow("test"); + assertEquals(0, gatewayAllocator.getNumberOfStartedShardBatches()); + assertEquals(0, gatewayAllocator.getNumberOfStoreShardBatches()); + assertEquals(0, gatewayAllocator.getNumberOfInFlightFetches()); + + // calling reroute and asserting on reroute response + logger.info("--> calling reroute while cluster is yellow"); + clusterRerouteResponse = client().admin().cluster().prepareReroute().setRetryFailed(true).get(); + assertTrue(clusterRerouteResponse.isAcknowledged()); + + // Now start last data node and ensure batch mode is working and cluster goes green + logger.info("--> restarting the second stopped node"); internalCluster().startDataOnlyNode(Settings.builder().put("node.name", dataOnlyNodes.get(1)).put(node1DataPathSettings).build()); ensureStableCluster(3); ensureGreen("test"); @@ -907,7 +938,9 @@ public void testBatchModeEnabledWithInSufficientTimeoutButClusterGreen() throws assertTrue(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_BATCH_MODE.get(internalCluster().clusterService().getSettings())); assertEquals(10, gatewayAllocator.getNumberOfStartedShardBatches()); - assertEquals(10, gatewayAllocator.getNumberOfStoreShardBatches()); + // All replica shards would be marked ineligible since there are no data nodes. + // They would then be removed from any batch and batches would get deleted, so we would have 0 replica batches + assertEquals(0, gatewayAllocator.getNumberOfStoreShardBatches()); health = client(internalCluster().getClusterManagerName()).admin().cluster().health(Requests.clusterHealthRequest()).actionGet(); assertFalse(health.isTimedOut()); assertEquals(RED, health.getStatus()); @@ -1051,6 +1084,18 @@ public void testMultipleReplicaShardAssignmentWithDelayedAllocationAndDifferentN ensureGreen("test"); } + public void testAllocationExplainReturnsNoWhenExtraReplicaShardInNonBatchMode() throws Exception { + // Non batch mode - This test is to validate that we don't return AWAITING_INFO in allocation explain API when the deciders are + // returning NO + this.allocationExplainReturnsNoWhenExtraReplicaShard(false); + } + + public void testAllocationExplainReturnsNoWhenExtraReplicaShardInBatchMode() throws Exception { + // Batch mode - This test is to validate that we don't return AWAITING_INFO in allocation explain API when the deciders are + // returning NO + this.allocationExplainReturnsNoWhenExtraReplicaShard(true); + } + public void testNBatchesCreationAndAssignment() throws Exception { // we will reduce batch size to 5 to make sure we have enough batches to test assignment // Total number of primary shards = 50 (50 indices*1) @@ -1104,7 +1149,9 @@ public void testNBatchesCreationAndAssignment() throws Exception { ); assertTrue(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_BATCH_MODE.get(internalCluster().clusterService().getSettings())); assertEquals(10, gatewayAllocator.getNumberOfStartedShardBatches()); - assertEquals(10, gatewayAllocator.getNumberOfStoreShardBatches()); + // All replica shards would be marked ineligible since there are no data nodes. + // They would then be removed from any batch and batches would get deleted, so we would have 0 replica batches + assertEquals(0, gatewayAllocator.getNumberOfStoreShardBatches()); health = client(internalCluster().getClusterManagerName()).admin().cluster().health(Requests.clusterHealthRequest()).actionGet(); assertFalse(health.isTimedOut()); assertEquals(RED, health.getStatus()); @@ -1193,7 +1240,9 @@ public void testCulpritShardInBatch() throws Exception { ); assertTrue(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_BATCH_MODE.get(internalCluster().clusterService().getSettings())); assertEquals(1, gatewayAllocator.getNumberOfStartedShardBatches()); - assertEquals(1, gatewayAllocator.getNumberOfStoreShardBatches()); + // Replica shard would be marked ineligible since there are no data nodes. + // It would then be removed from any batch and batches would get deleted, so we would have 0 replica batches + assertEquals(0, gatewayAllocator.getNumberOfStoreShardBatches()); assertTrue(clusterRerouteResponse.isAcknowledged()); health = client(internalCluster().getClusterManagerName()).admin().cluster().health(Requests.clusterHealthRequest()).actionGet(); assertFalse(health.isTimedOut()); @@ -1511,4 +1560,97 @@ private List findNodesWithShard(final boolean primary) { Collections.shuffle(requiredStartedShards, random()); return requiredStartedShards.stream().map(shard -> state.nodes().get(shard.currentNodeId()).getName()).collect(Collectors.toList()); } + + private void allocationExplainReturnsNoWhenExtraReplicaShard(boolean batchModeEnabled) throws Exception { + internalCluster().startClusterManagerOnlyNodes( + 1, + Settings.builder().put(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_BATCH_MODE.getKey(), batchModeEnabled).build() + ); + internalCluster().startDataOnlyNodes(5); + createIndex( + "test", + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 4).build() + ); + ensureGreen("test"); + ensureStableCluster(6); + + // Stop one of the nodes to make the cluster yellow + // We cannot directly create an index with replica = data node count because then the whole flow will get skipped due to + // INDEX_CREATED + List nodesWithReplicaShards = findNodesWithShard(false); + Settings replicaNodeDataPathSettings = internalCluster().dataPathSettings(nodesWithReplicaShards.get(0)); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodesWithReplicaShards.get(0))); + + ensureStableCluster(5); + ensureYellow("test"); + + logger.info("--> calling allocation explain API"); + // shard should have decision NO because there is no valid node for the extra replica to go to + AllocateUnassignedDecision aud = client().admin() + .cluster() + .prepareAllocationExplain() + .setIndex("test") + .setShard(0) + .setPrimary(false) + .get() + .getExplanation() + .getShardAllocationDecision() + .getAllocateDecision(); + + assertEquals(AllocationDecision.NO, aud.getAllocationDecision()); + assertEquals("cannot allocate because allocation is not permitted to any of the nodes", aud.getExplanation()); + + // Now creating a new index with too many replicas and trying again + createIndex( + "test2", + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 5).build() + ); + + ensureYellowAndNoInitializingShards("test2"); + + logger.info("--> calling allocation explain API again"); + // shard should have decision NO because there are 6 replicas and 4 data nodes + aud = client().admin() + .cluster() + .prepareAllocationExplain() + .setIndex("test2") + .setShard(0) + .setPrimary(false) + .get() + .getExplanation() + .getShardAllocationDecision() + .getAllocateDecision(); + + assertEquals(AllocationDecision.NO, aud.getAllocationDecision()); + assertEquals("cannot allocate because allocation is not permitted to any of the nodes", aud.getExplanation()); + + logger.info("--> restarting the stopped node"); + internalCluster().startDataOnlyNode( + Settings.builder().put("node.name", nodesWithReplicaShards.get(0)).put(replicaNodeDataPathSettings).build() + ); + + ensureStableCluster(6); + ensureGreen("test"); + + logger.info("--> calling allocation explain API 3rd time"); + // shard should still have decision NO because there are 6 replicas and 5 data nodes + aud = client().admin() + .cluster() + .prepareAllocationExplain() + .setIndex("test2") + .setShard(0) + .setPrimary(false) + .get() + .getExplanation() + .getShardAllocationDecision() + .getAllocateDecision(); + + assertEquals(AllocationDecision.NO, aud.getAllocationDecision()); + assertEquals("cannot allocate because allocation is not permitted to any of the nodes", aud.getExplanation()); + + internalCluster().startDataOnlyNodes(1); + + ensureStableCluster(7); + ensureGreen("test2"); + } } diff --git a/server/src/main/java/org/opensearch/gateway/AsyncShardBatchFetch.java b/server/src/main/java/org/opensearch/gateway/AsyncShardBatchFetch.java index 4f39a39cea678..df642a9f5a743 100644 --- a/server/src/main/java/org/opensearch/gateway/AsyncShardBatchFetch.java +++ b/server/src/main/java/org/opensearch/gateway/AsyncShardBatchFetch.java @@ -80,6 +80,14 @@ public synchronized void clearShard(ShardId shardId) { this.cache.deleteShard(shardId); } + public boolean hasEmptyCache() { + return this.cache.getCache().isEmpty(); + } + + public AsyncShardFetchCache getCache() { + return this.cache; + } + /** * Cache implementation of transport actions returning batch of shards related data in the response. * Store node level responses of transport actions like {@link TransportNodesListGatewayStartedShardsBatch} or diff --git a/server/src/main/java/org/opensearch/gateway/ReplicaShardBatchAllocator.java b/server/src/main/java/org/opensearch/gateway/ReplicaShardBatchAllocator.java index 7c75f2a5d1a8f..0818b187271cb 100644 --- a/server/src/main/java/org/opensearch/gateway/ReplicaShardBatchAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/ReplicaShardBatchAllocator.java @@ -183,7 +183,7 @@ private AllocateUnassignedDecision getUnassignedShardAllocationDecision( if (allocationDecision.type() != Decision.Type.YES && (!explain || !hasInitiatedFetching(shardRouting))) { // only return early if we are not in explain mode, or we are in explain mode but we have not // yet attempted to fetch any shard data - logger.trace("{}: ignoring allocation, can't be allocated on any node", shardRouting); + logger.trace("{}: ignoring allocation, can't be allocated on any node. Decision: {}", shardRouting, allocationDecision.type()); return AllocateUnassignedDecision.no( UnassignedInfo.AllocationStatus.fromDecision(allocationDecision.type()), result.v2() != null ? new ArrayList<>(result.v2().values()) : null diff --git a/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java b/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java index 55f5388d8f454..673ed8dbaa1c3 100644 --- a/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java @@ -576,8 +576,37 @@ protected AsyncShardFetch.FetchResult Date: Mon, 29 Jul 2024 10:26:49 -0400 Subject: [PATCH 22/68] Bump com.microsoft.azure:msal4j from 1.16.1 to 1.16.2 in /plugins/repository-azure (#14995) * Bump com.microsoft.azure:msal4j in /plugins/repository-azure Bumps [com.microsoft.azure:msal4j](https://github.com/AzureAD/microsoft-authentication-library-for-java) from 1.16.1 to 1.16.2. - [Release notes](https://github.com/AzureAD/microsoft-authentication-library-for-java/releases) - [Changelog](https://github.com/AzureAD/microsoft-authentication-library-for-java/blob/dev/changelog.txt) - [Commits](https://github.com/AzureAD/microsoft-authentication-library-for-java/compare/v1.16.1...v1.16.2) --- updated-dependencies: - dependency-name: com.microsoft.azure:msal4j dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + plugins/repository-azure/build.gradle | 2 +- plugins/repository-azure/licenses/msal4j-1.16.1.jar.sha1 | 1 - plugins/repository-azure/licenses/msal4j-1.16.2.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-azure/licenses/msal4j-1.16.1.jar.sha1 create mode 100644 plugins/repository-azure/licenses/msal4j-1.16.2.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index d4c8c955bced4..138efce1c29e7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Dependencies - Bump `org.apache.commons:commons-lang3` from 3.14.0 to 3.15.0 ([#14861](https://github.com/opensearch-project/OpenSearch/pull/14861)) +- Bump `com.microsoft.azure:msal4j` from 1.16.1 to 1.16.2 ([#14995](https://github.com/opensearch-project/OpenSearch/pull/14995)) ### Changed diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 7bd7be1481a2f..15e3158f2dbc4 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -61,7 +61,7 @@ dependencies { // Start of transitive dependencies for azure-identity api 'com.microsoft.azure:msal4j-persistence-extension:1.3.0' api "net.java.dev.jna:jna-platform:${versions.jna}" - api 'com.microsoft.azure:msal4j:1.16.1' + api 'com.microsoft.azure:msal4j:1.16.2' api 'com.nimbusds:oauth2-oidc-sdk:11.9.1' api 'com.nimbusds:nimbus-jose-jwt:9.40' api 'com.nimbusds:content-type:2.3' diff --git a/plugins/repository-azure/licenses/msal4j-1.16.1.jar.sha1 b/plugins/repository-azure/licenses/msal4j-1.16.1.jar.sha1 deleted file mode 100644 index 7d24922196be4..0000000000000 --- a/plugins/repository-azure/licenses/msal4j-1.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4ad89b4632ef9abab883114e77c079843a206862 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/msal4j-1.16.2.jar.sha1 b/plugins/repository-azure/licenses/msal4j-1.16.2.jar.sha1 new file mode 100644 index 0000000000000..1363e5a0793d2 --- /dev/null +++ b/plugins/repository-azure/licenses/msal4j-1.16.2.jar.sha1 @@ -0,0 +1 @@ +b43ec4dd657f8ed5922bc0a8ccbe49000968bd15 \ No newline at end of file From 122f3f0ab7448f06a20b46919c6e23e74ce1fa9c Mon Sep 17 00:00:00 2001 From: Rishab Nahata Date: Mon, 29 Jul 2024 20:20:26 +0530 Subject: [PATCH 23/68] Cache index shard limit to optimise ShardLimitsAllocationDecider (#14962) * Cache index shard limit per node Signed-off-by: Rishab Nahata --- .../routing/allocation/RerouteBenchmark.java | 135 ++++++++++++++++++ .../cluster/metadata/IndexMetadata.java | 16 ++- .../decider/ShardsLimitAllocationDecider.java | 4 +- 3 files changed, 150 insertions(+), 5 deletions(-) create mode 100644 benchmarks/src/main/java/org/opensearch/benchmark/routing/allocation/RerouteBenchmark.java diff --git a/benchmarks/src/main/java/org/opensearch/benchmark/routing/allocation/RerouteBenchmark.java b/benchmarks/src/main/java/org/opensearch/benchmark/routing/allocation/RerouteBenchmark.java new file mode 100644 index 0000000000000..e54bca579423b --- /dev/null +++ b/benchmarks/src/main/java/org/opensearch/benchmark/routing/allocation/RerouteBenchmark.java @@ -0,0 +1,135 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.benchmark.routing.allocation; + +import org.opensearch.Version; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.allocation.AllocationService; +import org.opensearch.common.logging.LogConfigurator; +import org.opensearch.common.settings.Settings; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.opensearch.cluster.routing.ShardRoutingState.INITIALIZING; + +@Fork(1) +@Warmup(iterations = 3) +@Measurement(iterations = 3) +@BenchmarkMode(Mode.AverageTime) +@OutputTimeUnit(TimeUnit.MILLISECONDS) +@State(Scope.Benchmark) +@SuppressWarnings("unused") // invoked by benchmarking framework +public class RerouteBenchmark { + @Param({ + // indices| nodes + " 10000| 500|", }) + public String indicesNodes = "1|1"; + public int numIndices; + public int numNodes; + public int numShards = 10; + public int numReplicas = 1; + + private AllocationService allocationService; + private ClusterState initialClusterState; + + @Setup + public void setUp() throws Exception { + LogConfigurator.setNodeName("test"); + final String[] params = indicesNodes.split("\\|"); + numIndices = toInt(params[0]); + numNodes = toInt(params[1]); + + int totalShardCount = (numReplicas + 1) * numShards * numIndices; + Metadata.Builder mb = Metadata.builder(); + for (int i = 1; i <= numIndices; i++) { + mb.put( + IndexMetadata.builder("test_" + i) + .settings(Settings.builder().put("index.version.created", Version.CURRENT)) + .numberOfShards(numShards) + .numberOfReplicas(numReplicas) + ); + } + + Metadata metadata = mb.build(); + RoutingTable.Builder rb = RoutingTable.builder(); + for (int i = 1; i <= numIndices; i++) { + rb.addAsNew(metadata.index("test_" + i)); + } + RoutingTable routingTable = rb.build(); + initialClusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(routingTable) + .nodes(setUpClusterNodes(numNodes)) + .build(); + } + + @Benchmark + public ClusterState measureShardAllocationEmptyCluster() throws Exception { + ClusterState clusterState = initialClusterState; + allocationService = Allocators.createAllocationService( + Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.load_awareness.provisioned_capacity", numNodes) + .put("cluster.routing.allocation.load_awareness.skew_factor", "50") + .put("cluster.routing.allocation.node_concurrent_recoveries", "2") + .build() + ); + clusterState = allocationService.reroute(clusterState, "reroute"); + while (clusterState.getRoutingNodes().hasUnassignedShards()) { + clusterState = startInitializingShardsAndReroute(allocationService, clusterState); + } + return clusterState; + } + + private int toInt(String v) { + return Integer.valueOf(v.trim()); + } + + private DiscoveryNodes.Builder setUpClusterNodes(int nodes) { + DiscoveryNodes.Builder nb = DiscoveryNodes.builder(); + for (int i = 1; i <= nodes; i++) { + Map attributes = new HashMap<>(); + attributes.put("zone", "zone_" + (i % 3)); + nb.add(Allocators.newNode("node_0_" + i, attributes)); + } + return nb; + } + + private static ClusterState startInitializingShardsAndReroute(AllocationService allocationService, ClusterState clusterState) { + return startShardsAndReroute(allocationService, clusterState, clusterState.routingTable().shardsWithState(INITIALIZING)); + } + + private static ClusterState startShardsAndReroute( + AllocationService allocationService, + ClusterState clusterState, + List initializingShards + ) { + return allocationService.reroute(allocationService.applyStartedShards(clusterState, initializingShards), "reroute after starting"); + } +} diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index 9e7fe23f29872..df0d2609ad83d 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -43,6 +43,7 @@ import org.opensearch.cluster.block.ClusterBlockLevel; import org.opensearch.cluster.node.DiscoveryNodeFilters; import org.opensearch.cluster.routing.allocation.IndexMetadataUpdater; +import org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; import org.opensearch.common.Nullable; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.MapBuilder; @@ -686,6 +687,8 @@ public static APIBlock readFrom(StreamInput input) throws IOException { private final boolean isSystem; private final boolean isRemoteSnapshot; + private final int indexTotalShardsPerNodeLimit; + private IndexMetadata( final Index index, final long version, @@ -711,7 +714,8 @@ private IndexMetadata( final int routingPartitionSize, final ActiveShardCount waitForActiveShards, final Map rolloverInfos, - final boolean isSystem + final boolean isSystem, + final int indexTotalShardsPerNodeLimit ) { this.index = index; @@ -746,6 +750,7 @@ private IndexMetadata( this.rolloverInfos = Collections.unmodifiableMap(rolloverInfos); this.isSystem = isSystem; this.isRemoteSnapshot = IndexModule.Type.REMOTE_SNAPSHOT.match(this.settings); + this.indexTotalShardsPerNodeLimit = indexTotalShardsPerNodeLimit; assert numberOfShards * routingFactor == routingNumShards : routingNumShards + " must be a multiple of " + numberOfShards; } @@ -899,6 +904,10 @@ public Set inSyncAllocationIds(int shardId) { return inSyncAllocationIds.get(shardId); } + public int getIndexTotalShardsPerNodeLimit() { + return this.indexTotalShardsPerNodeLimit; + } + @Nullable public DiscoveryNodeFilters requireFilters() { return requireFilters; @@ -1583,6 +1592,8 @@ public IndexMetadata build() { ); } + final int indexTotalShardsPerNodeLimit = ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING.get(settings); + final String uuid = settings.get(SETTING_INDEX_UUID, INDEX_UUID_NA_VALUE); return new IndexMetadata( @@ -1610,7 +1621,8 @@ public IndexMetadata build() { routingPartitionSize, waitForActiveShards, rolloverInfos, - isSystem + isSystem, + indexTotalShardsPerNodeLimit ); } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java index c008102554e8c..6f211f370de95 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java @@ -32,7 +32,6 @@ package org.opensearch.cluster.routing.allocation.decider; -import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; @@ -125,8 +124,7 @@ private Decision doDecide( RoutingAllocation allocation, BiPredicate decider ) { - IndexMetadata indexMd = allocation.metadata().getIndexSafe(shardRouting.index()); - final int indexShardLimit = INDEX_TOTAL_SHARDS_PER_NODE_SETTING.get(indexMd.getSettings(), settings); + final int indexShardLimit = allocation.metadata().getIndexSafe(shardRouting.index()).getIndexTotalShardsPerNodeLimit(); // Capture the limit here in case it changes during this method's // execution final int clusterShardLimit = this.clusterShardLimit; From 691f78ca480588df3d27dcad96601b22e77b6386 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Mon, 29 Jul 2024 11:50:29 -0400 Subject: [PATCH 24/68] OpenJDK Update (July 2024 Patch releases) (#14998) Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + .../java/org/opensearch/gradle/test/DistroTestPlugin.java | 4 ++-- buildSrc/version.properties | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 138efce1c29e7..1b21fded97e47 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Dependencies - Bump `org.apache.commons:commons-lang3` from 3.14.0 to 3.15.0 ([#14861](https://github.com/opensearch-project/OpenSearch/pull/14861)) +- OpenJDK Update (July 2024 Patch releases) ([#14998](https://github.com/opensearch-project/OpenSearch/pull/14998)) - Bump `com.microsoft.azure:msal4j` from 1.16.1 to 1.16.2 ([#14995](https://github.com/opensearch-project/OpenSearch/pull/14995)) ### Changed diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java index b2b3e3003e572..8d5ce9143cbac 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java @@ -77,9 +77,9 @@ import java.util.stream.Stream; public class DistroTestPlugin implements Plugin { - private static final String SYSTEM_JDK_VERSION = "21.0.3+9"; + private static final String SYSTEM_JDK_VERSION = "21.0.4+7"; private static final String SYSTEM_JDK_VENDOR = "adoptium"; - private static final String GRADLE_JDK_VERSION = "21.0.3+9"; + private static final String GRADLE_JDK_VERSION = "21.0.4+7"; private static final String GRADLE_JDK_VENDOR = "adoptium"; // all distributions used by distro tests. this is temporary until tests are per distribution diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 855ccc1f87413..7d32ed3df7b76 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -2,7 +2,7 @@ opensearch = 3.0.0 lucene = 9.12.0-snapshot-847316d bundled_jdk_vendor = adoptium -bundled_jdk = 21.0.3+9 +bundled_jdk = 21.0.4+7 # optional dependencies spatial4j = 0.7 From f5b0ebaab8632932faa362caffa412b7eb6eb23a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 Jul 2024 12:03:23 -0500 Subject: [PATCH 25/68] Bump actions/github-script from 6 to 7 (#14997) * Bump actions/github-script from 6 to 7 Bumps [actions/github-script](https://github.com/actions/github-script) from 6 to 7. - [Release notes](https://github.com/actions/github-script/releases) - [Commits](https://github.com/actions/github-script/compare/v6...v7) --- updated-dependencies: - dependency-name: actions/github-script dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Signed-off-by: Daniel (dB.) Doubrovkine Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Co-authored-by: Daniel (dB.) Doubrovkine --- .github/workflows/add-performance-comment.yml | 2 +- .github/workflows/benchmark-pull-request.yml | 6 +++--- .github/workflows/maintainer-approval.yml | 2 +- .github/workflows/triage.yml | 2 +- .github/workflows/version.yml | 2 +- CHANGELOG.md | 1 + 6 files changed, 8 insertions(+), 7 deletions(-) diff --git a/.github/workflows/add-performance-comment.yml b/.github/workflows/add-performance-comment.yml index fc272714c5628..6a310bff4c0a1 100644 --- a/.github/workflows/add-performance-comment.yml +++ b/.github/workflows/add-performance-comment.yml @@ -16,7 +16,7 @@ jobs: steps: - name: Add comment to PR - uses: actions/github-script@v6 + uses: actions/github-script@v7 with: github-token: ${{secrets.GITHUB_TOKEN}} script: | diff --git a/.github/workflows/benchmark-pull-request.yml b/.github/workflows/benchmark-pull-request.yml index 47abcc1178572..98dd39b1dad54 100644 --- a/.github/workflows/benchmark-pull-request.yml +++ b/.github/workflows/benchmark-pull-request.yml @@ -25,7 +25,7 @@ jobs: echo "USER_TAGS=pull_request_number:${{ github.event.issue.number }},repository:OpenSearch" >> $GITHUB_ENV - name: Check comment format id: check_comment - uses: actions/github-script@v6 + uses: actions/github-script@v7 with: script: | const fs = require('fs'); @@ -62,7 +62,7 @@ jobs: } - name: Post invalid format comment if: steps.check_comment.outputs.invalid == 'true' - uses: actions/github-script@v6 + uses: actions/github-script@v7 with: github-token: ${{secrets.GITHUB_TOKEN}} script: | @@ -150,7 +150,7 @@ jobs: cat $GITHUB_ENV bash opensearch-build/scripts/benchmark/benchmark-pull-request.sh ${{ secrets.JENKINS_PR_BENCHMARK_GENERIC_WEBHOOK_TOKEN }} - name: Update PR with Job Url - uses: actions/github-script@v6 + uses: actions/github-script@v7 with: github-token: ${{ secrets.GITHUB_TOKEN }} script: | diff --git a/.github/workflows/maintainer-approval.yml b/.github/workflows/maintainer-approval.yml index fdc2bf16937b4..34e8f57cc1878 100644 --- a/.github/workflows/maintainer-approval.yml +++ b/.github/workflows/maintainer-approval.yml @@ -9,7 +9,7 @@ jobs: runs-on: ubuntu-latest steps: - id: find-maintainers - uses: actions/github-script@v7.0.1 + uses: actions/github-script@v7 with: github-token: ${{ secrets.GITHUB_TOKEN }} result-encoding: string diff --git a/.github/workflows/triage.yml b/.github/workflows/triage.yml index 83bf4926a8c2d..c305818bdb0a9 100644 --- a/.github/workflows/triage.yml +++ b/.github/workflows/triage.yml @@ -9,7 +9,7 @@ jobs: if: github.repository == 'opensearch-project/OpenSearch' runs-on: ubuntu-latest steps: - - uses: actions/github-script@v7.0.1 + - uses: actions/github-script@v7 with: script: | const { issue, repository } = context.payload; diff --git a/.github/workflows/version.yml b/.github/workflows/version.yml index 7f120b65d7c2e..2de54716256ff 100644 --- a/.github/workflows/version.yml +++ b/.github/workflows/version.yml @@ -129,7 +129,7 @@ jobs: - name: Create tracking issue id: create-issue - uses: actions/github-script@v7.0.1 + uses: actions/github-script@v7 with: script: | const body = ` diff --git a/CHANGELOG.md b/CHANGELOG.md index 1b21fded97e47..7cc918b2ac089 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.apache.commons:commons-lang3` from 3.14.0 to 3.15.0 ([#14861](https://github.com/opensearch-project/OpenSearch/pull/14861)) - OpenJDK Update (July 2024 Patch releases) ([#14998](https://github.com/opensearch-project/OpenSearch/pull/14998)) - Bump `com.microsoft.azure:msal4j` from 1.16.1 to 1.16.2 ([#14995](https://github.com/opensearch-project/OpenSearch/pull/14995)) +- Bump `actions/github-script` from 6 to 7 ([#14997](https://github.com/opensearch-project/OpenSearch/pull/14997)) ### Changed From e26608b1492a8c1dcc61b8f3965564a40b3c0401 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Mon, 29 Jul 2024 13:43:22 -0700 Subject: [PATCH 26/68] [Derived Fields] Add aggregation support for derived fields (#14618) * Add aggregation support for derived fields Signed-off-by: Marc Handalian * add unit test for a terms agg with derived fields Signed-off-by: Marc Handalian * Fix license header and add changelog entry Signed-off-by: Marc Handalian * move matrix_stats tests to aggs-matrix-stats module Signed-off-by: Marc Handalian * Move matrix tests back and add dependency to painless module Signed-off-by: Marc Handalian * add tests for all aggregations types and support ip_range Signed-off-by: Marc Handalian * Add tests for agg script returned from DerivedFieldType Signed-off-by: Marc Handalian * remove children aggs test as its not yet supported Signed-off-by: Marc Handalian * Add more tests Signed-off-by: Marc Handalian * fix changelog Signed-off-by: Marc Handalian --------- Signed-off-by: Marc Handalian --- CHANGELOG.md | 1 + modules/lang-painless/build.gradle | 1 + .../derived_fields/60_derived_field_aggs.yml | 1521 +++++++++++++++++ .../index/mapper/DerivedFieldType.java | 75 +- .../index/mapper/ObjectDerivedFieldType.java | 31 +- .../support/ValuesSourceConfig.java | 9 +- .../support/values/ScriptBytesValues.java | 7 +- .../index/mapper/DerivedFieldTypeTests.java | 69 + .../terms/DerivedFieldAggregationTests.java | 146 ++ .../support/ValuesSourceConfigTests.java | 39 + 10 files changed, 1889 insertions(+), 10 deletions(-) create mode 100644 modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/60_derived_field_aggs.yml create mode 100644 server/src/test/java/org/opensearch/search/aggregations/bucket/terms/DerivedFieldAggregationTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 7cc918b2ac089..36cd33cc40453 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added - Fix for hasInitiatedFetching to fix allocation explain and manual reroute APIs (([#14972](https://github.com/opensearch-project/OpenSearch/pull/14972)) +- Add basic aggregation support for derived fields ([#14618](https://github.com/opensearch-project/OpenSearch/pull/14618)) ### Dependencies - Bump `org.apache.commons:commons-lang3` from 3.14.0 to 3.15.0 ([#14861](https://github.com/opensearch-project/OpenSearch/pull/14861)) diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle index fb51a0bb7f157..7b828109139c8 100644 --- a/modules/lang-painless/build.gradle +++ b/modules/lang-painless/build.gradle @@ -46,6 +46,7 @@ ext { testClusters.all { module ':modules:mapper-extras' + module ':modules:aggs-matrix-stats' systemProperty 'opensearch.scripting.update.ctx_in_params', 'false' // TODO: remove this once cname is prepended to transport.publish_address by default in 8.0 systemProperty 'opensearch.transport.cname_in_publish_address', 'true' diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/60_derived_field_aggs.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/60_derived_field_aggs.yml new file mode 100644 index 0000000000000..ba879a5fd73c3 --- /dev/null +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/60_derived_field_aggs.yml @@ -0,0 +1,1521 @@ +--- +setup: +- skip: + version: " - 2.14.99" + reason: "derived_field feature was added in 2.15" + +# -- NOT SUPPORTED: -- +# geobounds +# scripted metric +# -- NOT SUPPORTED: -- +# Any geo agg +# sig terms/text + +- do: + indices.create: + index: test + body: + mappings: + properties: + text: + type: text + keyword: + type: keyword + os: + type: keyword + long: + type: long + float: + type: float + double: + type: double + date: + type: date + geo: + type: geo_point + ip: + type: ip + boolean: + type: boolean + array_of_long: + type: long + json_field: + type: text + derived: + derived_text: + type: text + script: "emit(params._source[\"text\"])" + derived_text_prefilter_field: + type: text + script: "emit(params._source[\"text\"])" + prefilter_field: "text" + derived_keyword: + type: keyword + script: "emit(params._source[\"keyword\"])" + derived_os: + type: keyword + script: "emit(params._source[\"os\"])" + derived_long: + type: long + script: "emit(params._source[\"long\"])" + derived_float: + type: float + script: "emit(params._source[\"float\"])" + derived_double: + type: double + script: "emit(params._source[\"double\"])" + derived_date: + type: date + script: "emit(ZonedDateTime.parse(params._source[\"date\"]).toInstant().toEpochMilli())" + derived_geo: + type: geo_point + script: "emit(params._source[\"geo\"][0], params._source[\"geo\"][1])" + derived_ip: + type: ip + script: "emit(params._source[\"ip\"])" + derived_boolean: + type: boolean + script: "emit(params._source[\"boolean\"])" + derived_array_of_long: + type: long + script: "emit(params._source[\"array_of_long\"][0]);emit(params._source[\"array_of_long\"][1]);" + derived_object: + type: object + properties: + keyword: keyword + ip: ip + os: keyword + script: "emit(params._source[\"json_field\"])" + prefilter_field: "json_field" + +- do: + bulk: + refresh: true + body: + - index: + _index: test + _id: 1 + - text: "peter piper" + keyword: "foo" + os: "mac" + long: 1 + float: 1.0 + double: 1.0 + date: "2017-01-01T00:00:00Z" + geo: [ -74.0060, 40.7128 ] + ip: "192.168.0.1" + boolean: true + array_of_long: [ 1, 2 ] + json_field: "{\"text\":\"peter piper\",\"keyword\":\"foo\",\"os\":\"mac\",\"long\":1,\"float\":1.0,\"double\":1.0,\"date\":\"2017-01-01T00:00:00Z\",\"ip\":\"192.168.0.1\",\"boolean\":true, \"array_of_long\": [1, 2]}" + - index: + _index: test + _id: 2 + - text: "piper picked a peck" + keyword: "bar" + os: "windows" + long: 2 + float: 2.0 + double: 2.0 + date: "2017-01-02T00:00:00Z" + geo: [ -118.2437, 34.0522 ] + ip: "10.0.0.1" + boolean: false + array_of_long: [ 2, 3 ] + json_field: "{\"keyword\":\"bar\",\"long\":2,\"float\":2.0,\"os\":\"windows\",\"double\":2.0,\"date\":\"2017-01-02T00:00:00Z\",\"ip\":\"10.0.0.1\",\"boolean\":false, \"array_of_long\": [2, 3]}" + - index: + _index: test + _id: 3 + - text: "peck of pickled peppers" + keyword: "baz" + os: "mac" + long: -3 + float: -3.0 + double: -3.0 + date: "2017-01-03T00:00:00Z" + geo: [ -87.6298, 41.87 ] + ip: "172.16.0.1" + boolean: true + array_of_long: [ 3, 4 ] + json_field: "{\"keyword\":\"baz\",\"long\":-3,\"float\":-3.0,\"os\":\"mac\",\"double\":-3.0,\"date\":\"2017-01-03T00:00:00Z\",\"ip\":\"172.16.0.1\",\"boolean\":true, \"array_of_long\": [3, 4]}" + - index: + _index: test + _id: 4 + - text: "pickled peppers" + keyword: "qux" + os: "windows" + long: 4 + float: 4.0 + double: 4.0 + date: "2017-01-04T00:00:00Z" + geo: [ -74.0060, 40.7128 ] + ip: "192.168.0.2" + boolean: false + array_of_long: [ 4, 5 ] + json_field: "{\"keyword\":\"qux\",\"long\":4,\"float\":4.0,\"os\":\"windows\",\"double\":4.0,\"date\":\"2017-01-04T00:00:00Z\",\"ip\":\"192.168.0.2\",\"boolean\":false, \"array_of_long\": [4, 5]}" + - index: + _index: test + _id: 5 + - text: "peppers" + keyword: "quux" + os: "mac" + long: 5 + float: 5.0 + double: 5.0 + date: "2017-01-05T00:00:00Z" + geo: [ -87.6298, 41.87 ] + ip: "10.0.0.2" + boolean: true + array_of_long: [ 5, 6 ] + json_field: "{\"keyword\":\"quux\",\"long\":5,\"float\":5.0,\"os\":\"mac\",\"double\":5.0,\"date\":\"2017-01-05T00:00:00Z\",\"ip\":\"10.0.0.2\",\"boolean\":true, \"array_of_long\": [5, 6]}" + +- do: + indices.refresh: + index: [test] + +### BUCKET AGGS +--- +"Test terms aggregation on derived_keyword from search definition": +- do: + search: + index: test + body: + derived: + derived_keyword_search_definition: + type: keyword + script: "emit(params._source[\"keyword\"])" + size: 0 + aggs: + keywords: + terms: + field: derived_keyword_search_definition + +- match: { hits.total.value: 5 } +- length: { aggregations.keywords.buckets: 5 } +- match: { aggregations.keywords.buckets.0.key: "bar" } +- match: { aggregations.keywords.buckets.0.doc_count: 1 } +- match: { aggregations.keywords.buckets.1.key: "baz" } +- match: { aggregations.keywords.buckets.1.doc_count: 1 } +- match: { aggregations.keywords.buckets.2.key: "foo" } +- match: { aggregations.keywords.buckets.2.doc_count: 1 } +- match: { aggregations.keywords.buckets.3.key: "quux" } +- match: { aggregations.keywords.buckets.3.doc_count: 1 } +- match: { aggregations.keywords.buckets.4.key: "qux" } +- match: { aggregations.keywords.buckets.4.doc_count: 1 } + +--- +"Test terms aggregation on derived_keyword": +- do: + search: + index: test + body: + size: 0 + aggs: + keywords: + terms: + field: derived_keyword + +- match: { hits.total.value: 5 } +- length: { aggregations.keywords.buckets: 5 } +- match: { aggregations.keywords.buckets.0.key: "bar" } +- match: { aggregations.keywords.buckets.0.doc_count: 1 } +- match: { aggregations.keywords.buckets.1.key: "baz" } +- match: { aggregations.keywords.buckets.1.doc_count: 1 } +- match: { aggregations.keywords.buckets.2.key: "foo" } +- match: { aggregations.keywords.buckets.2.doc_count: 1 } +- match: { aggregations.keywords.buckets.3.key: "quux" } +- match: { aggregations.keywords.buckets.3.doc_count: 1 } +- match: { aggregations.keywords.buckets.4.key: "qux" } +- match: { aggregations.keywords.buckets.4.doc_count: 1 } + +--- +"Test range aggregation on derived_long": +- do: + search: + index: test + body: + size: 0 + aggs: + long_ranges: + range: + field: derived_long + ranges: + - to: 0 + - from: 0 + to: 3 + - from: 3 + +- match: { hits.total.value: 5 } +- length: { aggregations.long_ranges.buckets: 3 } +- match: { aggregations.long_ranges.buckets.0.doc_count: 1 } +- match: { aggregations.long_ranges.buckets.1.doc_count: 2 } +- match: { aggregations.long_ranges.buckets.2.doc_count: 2 } + +--- +"Test histogram aggregation on derived_float": +- do: + search: + index: test + body: + size: 0 + aggs: + float_histogram: + histogram: + field: derived_float + interval: 2 + +- match: { hits.total.value: 5 } +- length: { aggregations.float_histogram.buckets: 5 } +- match: { aggregations.float_histogram.buckets.0.key: -4.0 } +- match: { aggregations.float_histogram.buckets.0.doc_count: 1 } + +--- +"Test date_histogram aggregation on derived_date": +- do: + search: + index: test + body: + size: 0 + aggs: + date_histogram: + date_histogram: + field: derived_date + calendar_interval: day + +- match: { hits.total.value: 5 } +- length: { aggregations.date_histogram.buckets: 5 } +- match: { aggregations.date_histogram.buckets.0.key_as_string: "2017-01-01T00:00:00.000Z" } +- match: { aggregations.date_histogram.buckets.0.doc_count: 1 } +- match: { aggregations.date_histogram.buckets.1.key_as_string: "2017-01-02T00:00:00.000Z" } +- match: { aggregations.date_histogram.buckets.1.doc_count: 1 } +- match: { aggregations.date_histogram.buckets.2.key_as_string: "2017-01-03T00:00:00.000Z" } +- match: { aggregations.date_histogram.buckets.2.doc_count: 1 } +- match: { aggregations.date_histogram.buckets.3.key_as_string: "2017-01-04T00:00:00.000Z" } +- match: { aggregations.date_histogram.buckets.3.doc_count: 1 } +- match: { aggregations.date_histogram.buckets.4.key_as_string: "2017-01-05T00:00:00.000Z" } +- match: { aggregations.date_histogram.buckets.4.doc_count: 1 } + +--- +"Test date_range aggregation on derived_date": +- do: + search: + index: test + body: + size: 0 + aggs: + date_range: + date_range: + field: derived_date + ranges: + - to: "2017-01-03T00:00:00Z" + - from: "2017-01-03T00:00:00Z" + +- match: { hits.total.value: 5 } +- match: { aggregations.date_range.buckets.0.key: "*-2017-01-03T00:00:00.000Z" } +- match: { aggregations.date_range.buckets.0.doc_count: 2 } +- match: { aggregations.date_range.buckets.1.key: "2017-01-03T00:00:00.000Z-*" } +- match: { aggregations.date_range.buckets.1.doc_count: 3 } + +--- +"Test filters aggregation on derived_boolean": +- do: + search: + index: test + body: + size: 0 + aggs: + boolean_filters: + filters: + filters: + true_values: + term: + derived_boolean: true + false_values: + term: + derived_boolean: false + +- match: { hits.total.value: 5 } +- match: { aggregations.boolean_filters.buckets.true_values.doc_count: 3 } +- match: { aggregations.boolean_filters.buckets.false_values.doc_count: 2 } + +--- +"Test adjacency matrix aggregation on derived_long": +- do: + search: + index: test + body: + size: 0 + aggs: + adj_matrix: + adjacency_matrix: + filters: + high_num: + range: + derived_long: + gte: 3 + low_num: + range: + derived_long: + lt: 3 +- match: { hits.total.value: 5 } +- length: { aggregations.adj_matrix.buckets: 2 } +- match: { aggregations.adj_matrix.buckets.0.key: "high_num" } +- match: { aggregations.adj_matrix.buckets.0.doc_count: 2 } +- match: { aggregations.adj_matrix.buckets.1.key: "low_num" } +- match: { aggregations.adj_matrix.buckets.1.doc_count: 3 } + +### METRIC AGGS + +--- +"Test stats aggregation on derived_array_of_long": +- do: + search: + index: test + body: + size: 0 + aggs: + long_array_stats: + stats: + field: derived_array_of_long + +- match: { hits.total.value: 5 } +- match: { aggregations.long_array_stats.count: 10 } +- match: { aggregations.long_array_stats.min: 1 } +- match: { aggregations.long_array_stats.max: 6 } +- match: { aggregations.long_array_stats.avg: 3.5 } +- match: { aggregations.long_array_stats.sum: 35 } + +--- +"Test cardinality aggregation on derived_keyword": +- do: + search: + index: test + body: + size: 0 + aggs: + unique_keywords: + cardinality: + field: derived_keyword + +- match: { hits.total.value: 5 } +- match: { aggregations.unique_keywords.value: 5 } + +--- +"Test percentiles aggregation on derived_double": +- do: + search: + index: test + body: + size: 0 + aggs: + double_percentiles: + percentiles: + field: derived_double + percents: [ 25, 50, 75 ] + +- match: { hits.total.value: 5 } +- match: { aggregations.double_percentiles.values.25\.0: 1.0 } +- match: { aggregations.double_percentiles.values.50\.0: 2.0 } +- match: { aggregations.double_percentiles.values.75\.0: 4.0 } + +--- +"Test percentile ranks aggregation on derived_long": +- do: + search: + index: test + body: + size: 0 + aggs: + long_percentile_ranks: + percentile_ranks: + field: derived_long + values: [ 2, 4 ] + +- match: { hits.total.value: 5 } +- match: { aggregations.long_percentile_ranks.values.2\.0: 50.0 } +- match: { aggregations.long_percentile_ranks.values.4\.0: 70.0 } + +--- +"Test top hits aggregation on derived_keyword": +- do: + search: + index: test + body: + size: 0 + aggs: + top_keywords: + terms: + field: derived_keyword + aggs: + top_hits: + top_hits: + size: 1 +- match: { hits.total.value: 5 } +- length: { aggregations.top_keywords.buckets: 5 } +- match: { aggregations.top_keywords.buckets.0.key: "bar" } +- match: { aggregations.top_keywords.buckets.0.doc_count: 1 } +- length: { aggregations.top_keywords.buckets.0.top_hits.hits.hits: 1 } + +--- +"Test matrix stats aggregation on derived_long and float": +- do: + search: + index: test + body: + size: 0 + aggs: + matrix_stats: + matrix_stats: + fields: [ derived_long, derived_float ] +- match: { hits.total.value: 5 } +- length: { aggregations.matrix_stats.fields: 2 } +- match: { aggregations.matrix_stats.fields.0.name: "derived_float" } +- match: { aggregations.matrix_stats.fields.0.count: 5 } +- match: { aggregations.matrix_stats.fields.1.name: "derived_long" } +- match: { aggregations.matrix_stats.fields.1.count: 5 } + +--- +"Test median absolute deviation aggregation on derived_long": +- do: + search: + index: test + body: + size: 0 + aggs: + mad_long: + median_absolute_deviation: + field: derived_long +- match: { hits.total.value: 5 } +- match: { aggregations.mad_long.value: 2.0 } + +## Pipeline agg +--- +"Test simple pipeline agg with derived_keyword and long": +- do: + search: + index: test + body: + size: 0 + aggs: + keywords: + terms: + field: derived_keyword + aggs: + sum_derived_longs: + sum: + field: derived_long + sum_total: + sum_bucket: + buckets_path: "keywords>sum_derived_longs" +- match: { hits.total.value: 5 } +- match: { aggregations.keywords.buckets.0.key: "bar" } +- match: { aggregations.keywords.buckets.0.sum_derived_longs.value: 2 } +- match: { aggregations.keywords.buckets.1.key: "baz" } +- match: { aggregations.keywords.buckets.1.sum_derived_longs.value: -3 } +- match: { aggregations.keywords.buckets.2.key: "foo" } +- match: { aggregations.keywords.buckets.2.sum_derived_longs.value: 1 } +- match: { aggregations.keywords.buckets.3.key: "quux" } +- match: { aggregations.keywords.buckets.3.sum_derived_longs.value: 5 } +- match: { aggregations.keywords.buckets.4.key: "qux" } +- match: { aggregations.keywords.buckets.4.sum_derived_longs.value: 4 } +- match: { aggregations.sum_total.value: 9 } + + +--- +"Test terms aggregation on derived_ip": +- do: + search: + index: test + body: + size: 0 + aggs: + ip_terms: + terms: + field: derived_ip + +- match: { hits.total.value: 5 } +- length: { aggregations.ip_terms.buckets: 5 } +- match: { aggregations.ip_terms.buckets.0.key: "10.0.0.1" } +- match: { aggregations.ip_terms.buckets.0.doc_count: 1 } + +--- +"Test range aggregation on derived_ip": +- do: + search: + index: test + body: + size: 0 + aggs: + ip_ranges: + ip_range: + field: derived_ip + ranges: + - to: "10.0.0.0" + - from: "10.0.0.0" + to: "172.16.0.0" + - from: "172.16.0.0" + +- match: { hits.total.value: 5 } +- length: { aggregations.ip_ranges.buckets: 3 } +- match: { aggregations.ip_ranges.buckets.0.doc_count: 0 } +- match: { aggregations.ip_ranges.buckets.1.doc_count: 2 } +- match: { aggregations.ip_ranges.buckets.2.doc_count: 3 } + +--- +"Test cardinality aggregation on derived_ip": +- do: + search: + index: test + body: + size: 0 + aggs: + unique_ips: + cardinality: + field: derived_ip + +- match: { hits.total.value: 5 } +- match: { aggregations.unique_ips.value: 5 } + +--- +"Test missing aggregation on derived_ip": +- do: + search: + index: test + body: + size: 0 + aggs: + missing_ips: + missing: + field: derived_ip + +- match: { hits.total.value: 5 } +- match: { aggregations.missing_ips.doc_count: 0 } + +--- +"Test value count aggregation on derived_ip": +- do: + search: + index: test + body: + size: 0 + aggs: + ip_count: + value_count: + field: derived_ip + +- match: { hits.total.value: 5 } +- match: { aggregations.ip_count.value: 5 } + +--- +"Test composite agg": +- do: + search: + index: test + body: + size: 0 + aggs: + test_composite_agg: + composite: + size: 10 + sources: + - os: + terms: + field: derived_os + - keyword: + terms: + field: derived_keyword + - is_true: + terms: + field: derived_boolean + aggs: + avg_long: + avg: + field: derived_long +- match: { aggregations.test_composite_agg.buckets.0.key.os: "mac" } +- match: { aggregations.test_composite_agg.buckets.0.key.keyword: "baz" } +- match: { aggregations.test_composite_agg.buckets.0.key.is_true: true } +- match: { aggregations.test_composite_agg.buckets.0.doc_count: 1 } +- match: { aggregations.test_composite_agg.buckets.0.avg_long.value: -3.0 } +- match: { aggregations.test_composite_agg.buckets.1.key.os: "mac" } +- match: { aggregations.test_composite_agg.buckets.1.key.keyword: "foo" } +- match: { aggregations.test_composite_agg.buckets.1.key.is_true: true } +- match: { aggregations.test_composite_agg.buckets.1.doc_count: 1 } +- match: { aggregations.test_composite_agg.buckets.1.avg_long.value: 1.0 } +- match: { aggregations.test_composite_agg.buckets.2.key.os: "mac" } +- match: { aggregations.test_composite_agg.buckets.2.key.keyword: "quux" } +- match: { aggregations.test_composite_agg.buckets.2.key.is_true: true } +- match: { aggregations.test_composite_agg.buckets.2.doc_count: 1 } +- match: { aggregations.test_composite_agg.buckets.2.avg_long.value: 5.0 } +- match: { aggregations.test_composite_agg.buckets.3.key.os: "windows" } +- match: { aggregations.test_composite_agg.buckets.3.key.keyword: "bar" } +- match: { aggregations.test_composite_agg.buckets.3.key.is_true: false } +- match: { aggregations.test_composite_agg.buckets.3.doc_count: 1 } +- match: { aggregations.test_composite_agg.buckets.3.avg_long.value: 2.0 } +- match: { aggregations.test_composite_agg.buckets.4.key.os: "windows" } +- match: { aggregations.test_composite_agg.buckets.4.key.keyword: "qux" } +- match: { aggregations.test_composite_agg.buckets.4.key.is_true: false } +- match: { aggregations.test_composite_agg.buckets.4.doc_count: 1 } +- match: { aggregations.test_composite_agg.buckets.4.avg_long.value: 4.0 } + +--- +"Test auto date histogram": +- do: + search: + rest_total_hits_as_int: true + index: test + body: + size: 0 + aggs: + test_auto_date_histogram: + auto_date_histogram: + field: "derived_date" + buckets: 10 + format: "yyyy-MM-dd" + aggs: + avg_long: + avg: + field: derived_long +- match: { hits.total: 5 } +- length: { aggregations.test_auto_date_histogram.buckets: 9 } +- match: { aggregations.test_auto_date_histogram.buckets.0.key_as_string: "2017-01-01"} +- match: { aggregations.test_auto_date_histogram.buckets.0.avg_long.value: 1.0} + +--- +"Test variable_width_histogram aggregation": +- do: + search: + index: test + body: + size: 0 + aggs: + var_width_hist: + variable_width_histogram: + field: derived_long + buckets: 3 + +- match: { hits.total.value: 5 } +- length: { aggregations.var_width_hist.buckets: 3 } + +--- +"Test extended_stats aggregation": +- do: + search: + index: test + body: + size: 0 + aggs: + extended_stats_agg: + extended_stats: + field: derived_long + +- match: { hits.total.value: 5 } +- match: { aggregations.extended_stats_agg.count: 5 } +- match: { aggregations.extended_stats_agg.min: -3 } +- match: { aggregations.extended_stats_agg.max: 5 } +- is_true: aggregations.extended_stats_agg.avg +- is_true: aggregations.extended_stats_agg.sum +- is_true: aggregations.extended_stats_agg.sum_of_squares +- is_true: aggregations.extended_stats_agg.variance +- is_true: aggregations.extended_stats_agg.std_deviation + +--- +"Test rare_terms aggregation": +- do: + search: + index: test + body: + size: 0 + aggs: + rare_terms_agg: + rare_terms: + field: derived_keyword + max_doc_count: 1 + +- match: { hits.total.value: 5 } +- length: { aggregations.rare_terms_agg.buckets: 5 } + +--- +"Test global aggregation": +- do: + search: + index: test + body: + query: + term: + derived_keyword: "foo" + aggs: + all_docs: + global: {} + aggs: + avg_long: + avg: + field: derived_long + +- match: { hits.total.value: 1 } +- match: { aggregations.all_docs.doc_count: 5 } +- match: { aggregations.all_docs.avg_long.value: 1.8 } + +--- +"Test missing aggregation": +- do: + search: + index: test + body: + size: 0 + aggs: + missing_agg: + missing: + field: derived_keyword + +- match: { hits.total.value: 5 } +- match: { aggregations.missing_agg.doc_count: 0 } + +--- +"Test value_count aggregation": +- do: + search: + index: test + body: + size: 0 + aggs: + value_count_agg: + value_count: + field: derived_long + +- match: { hits.total.value: 5 } +- match: { aggregations.value_count_agg.value: 5 } + +--- +"Test weighted_avg aggregation": +- do: + search: + index: test + body: + size: 0 + aggs: + weighted_avg_agg: + weighted_avg: + value: + field: derived_long + weight: + field: derived_float + +- match: { hits.total.value: 5 } +- is_true: aggregations.weighted_avg_agg.value + +--- +"Test diversified_sampler aggregation": +- do: + search: + index: test + body: + size: 0 + aggs: + diversified_sampler_agg: + diversified_sampler: + field: derived_keyword + max_docs_per_value: 1 + aggs: + avg_long: + avg: + field: derived_long + +- match: { hits.total.value: 5 } +- match: { aggregations.diversified_sampler_agg.doc_count: 5 } +- match: { aggregations.diversified_sampler_agg.avg_long.value: 1.8 } + +--- +"Test sampler aggregation": +- do: + search: + index: test + body: + size: 0 + aggs: + sampler_agg: + sampler: + shard_size: 2 + aggs: + avg_long: + avg: + field: derived_long + +- match: { hits.total.value: 5 } +- is_true: aggregations.sampler_agg.doc_count +- is_true: aggregations.sampler_agg.avg_long.value + +--- +"Test multi_terms aggregation": +- do: + search: + index: test + body: + size: 0 + aggs: + multi_terms_agg: + multi_terms: + terms: + - field: derived_keyword + - field: derived_os + size: 10 + +- match: { hits.total.value: 5 } +- length: { aggregations.multi_terms_agg.buckets: 5 } + +#### SAME TESTS WITH DERIVED_OBJECT +--- +"Test terms aggregation on derived_object.keyword": +- do: + search: + index: test + body: + size: 0 + aggs: + keywords: + terms: + field: derived_object.keyword + +- match: { hits.total.value: 5 } +- length: { aggregations.keywords.buckets: 5 } +- match: { aggregations.keywords.buckets.0.key: "bar" } +- match: { aggregations.keywords.buckets.0.doc_count: 1 } +- match: { aggregations.keywords.buckets.1.key: "baz" } +- match: { aggregations.keywords.buckets.1.doc_count: 1 } +- match: { aggregations.keywords.buckets.2.key: "foo" } +- match: { aggregations.keywords.buckets.2.doc_count: 1 } +- match: { aggregations.keywords.buckets.3.key: "quux" } +- match: { aggregations.keywords.buckets.3.doc_count: 1 } +- match: { aggregations.keywords.buckets.4.key: "qux" } +- match: { aggregations.keywords.buckets.4.doc_count: 1 } + +--- +"Test range aggregation on derived_object.long": +- do: + search: + index: test + body: + size: 0 + aggs: + long_ranges: + range: + field: derived_object.long + ranges: + - to: 0 + - from: 0 + to: 3 + - from: 3 + +- match: { hits.total.value: 5 } +- length: { aggregations.long_ranges.buckets: 3 } +- match: { aggregations.long_ranges.buckets.0.doc_count: 1 } +- match: { aggregations.long_ranges.buckets.1.doc_count: 2 } +- match: { aggregations.long_ranges.buckets.2.doc_count: 2 } + +--- +"Test histogram aggregation on derived_object.float": +- do: + search: + index: test + body: + size: 0 + aggs: + float_histogram: + histogram: + field: derived_object.float + interval: 2 + +- match: { hits.total.value: 5 } +- length: { aggregations.float_histogram.buckets: 5 } +- match: { aggregations.float_histogram.buckets.0.key: -4.0 } +- match: { aggregations.float_histogram.buckets.0.doc_count: 1 } + +--- +"Test date_histogram aggregation on derived_object.date": +- do: + search: + index: test + body: + size: 0 + aggs: + date_histogram: + date_histogram: + field: derived_object.date + calendar_interval: day + +- match: { hits.total.value: 5 } +- length: { aggregations.date_histogram.buckets: 5 } +- match: { aggregations.date_histogram.buckets.0.key_as_string: "2017-01-01T00:00:00.000Z" } +- match: { aggregations.date_histogram.buckets.0.doc_count: 1 } +- match: { aggregations.date_histogram.buckets.1.key_as_string: "2017-01-02T00:00:00.000Z" } +- match: { aggregations.date_histogram.buckets.1.doc_count: 1 } +- match: { aggregations.date_histogram.buckets.2.key_as_string: "2017-01-03T00:00:00.000Z" } +- match: { aggregations.date_histogram.buckets.2.doc_count: 1 } +- match: { aggregations.date_histogram.buckets.3.key_as_string: "2017-01-04T00:00:00.000Z" } +- match: { aggregations.date_histogram.buckets.3.doc_count: 1 } +- match: { aggregations.date_histogram.buckets.4.key_as_string: "2017-01-05T00:00:00.000Z" } +- match: { aggregations.date_histogram.buckets.4.doc_count: 1 } + +--- +"Test date_range aggregation on derived_object.date": +- do: + search: + index: test + body: + size: 0 + aggs: + date_range: + date_range: + field: derived_object.date + ranges: + - to: "2017-01-03T00:00:00Z" + - from: "2017-01-03T00:00:00Z" + +- match: { hits.total.value: 5 } +- match: { aggregations.date_range.buckets.0.key: "*-2017-01-03T00:00:00.000Z" } +- match: { aggregations.date_range.buckets.0.doc_count: 2 } +- match: { aggregations.date_range.buckets.1.key: "2017-01-03T00:00:00.000Z-*" } +- match: { aggregations.date_range.buckets.1.doc_count: 3 } + +--- +"Test filters aggregation on derived_object.boolean": +- do: + search: + index: test + body: + size: 0 + aggs: + boolean_filters: + filters: + filters: + true_values: + term: + derived_object.boolean: true + false_values: + term: + derived_object.boolean: false + +- match: { hits.total.value: 5 } +- match: { aggregations.boolean_filters.buckets.true_values.doc_count: 3 } +- match: { aggregations.boolean_filters.buckets.false_values.doc_count: 2 } + +--- +"Test adjacency matrix aggregation on derived_object.long": +- do: + search: + index: test + body: + size: 0 + aggs: + adj_matrix: + adjacency_matrix: + filters: + high_num: + range: + derived_object.long: + gte: 3 + low_num: + range: + derived_object.long: + lt: 3 +- match: { hits.total.value: 5 } +- length: { aggregations.adj_matrix.buckets: 2 } +- match: { aggregations.adj_matrix.buckets.0.key: "high_num" } +- match: { aggregations.adj_matrix.buckets.0.doc_count: 2 } +- match: { aggregations.adj_matrix.buckets.1.key: "low_num" } +- match: { aggregations.adj_matrix.buckets.1.doc_count: 3 } + +--- +"Test stats aggregation on derived_object.array_of_long": +- do: + search: + index: test + body: + size: 0 + aggs: + long_array_stats: + stats: + field: derived_object.array_of_long + +- match: { hits.total.value: 5 } +- match: { aggregations.long_array_stats.count: 10 } +- match: { aggregations.long_array_stats.min: 1 } +- match: { aggregations.long_array_stats.max: 6 } +- match: { aggregations.long_array_stats.avg: 3.5 } +- match: { aggregations.long_array_stats.sum: 35 } + +--- +"Test cardinality aggregation on derived_object_keyword": +- do: + search: + index: test + body: + size: 0 + aggs: + unique_keywords: + cardinality: + field: derived_object.keyword + +- match: { hits.total.value: 5 } +- match: { aggregations.unique_keywords.value: 5 } + +--- +"Test percentiles aggregation on derived_object.double": +- do: + search: + index: test + body: + size: 0 + aggs: + double_percentiles: + percentiles: + field: derived_object.double + percents: [ 25, 50, 75 ] + +- match: { hits.total.value: 5 } +- match: { aggregations.double_percentiles.values.25\.0: 1.0 } +- match: { aggregations.double_percentiles.values.50\.0: 2.0 } +- match: { aggregations.double_percentiles.values.75\.0: 4.0 } + +--- +"Test percentile ranks aggregation on derived_object.long": +- do: + search: + index: test + body: + size: 0 + aggs: + long_percentile_ranks: + percentile_ranks: + field: derived_object.long + values: [ 2, 4 ] + +- match: { hits.total.value: 5 } +- match: { aggregations.long_percentile_ranks.values.2\.0: 50.0 } +- match: { aggregations.long_percentile_ranks.values.4\.0: 70.0 } + +--- +"Test top hits aggregation on derived_object.keyword": +- do: + search: + index: test + body: + size: 0 + aggs: + top_keywords: + terms: + field: derived_object.keyword + aggs: + top_hits: + top_hits: + size: 1 +- match: { hits.total.value: 5 } +- length: { aggregations.top_keywords.buckets: 5 } +- match: { aggregations.top_keywords.buckets.0.key: "bar" } +- match: { aggregations.top_keywords.buckets.0.doc_count: 1 } +- length: { aggregations.top_keywords.buckets.0.top_hits.hits.hits: 1 } + +--- +"Test matrix stats aggregation on derived_object.long and float": +- do: + search: + index: test + body: + size: 0 + aggs: + matrix_stats: + matrix_stats: + fields: [ derived_object.long, derived_object.float ] +- match: { hits.total.value: 5 } +- length: { aggregations.matrix_stats.fields: 2 } +- match: { aggregations.matrix_stats.fields.0.name: "derived_object.long" } +- match: { aggregations.matrix_stats.fields.0.count: 5 } +- match: { aggregations.matrix_stats.fields.1.name: "derived_object.float" } +- match: { aggregations.matrix_stats.fields.1.count: 5 } + +--- +"Test median absolute deviation aggregation on derived_object.long": +- do: + search: + index: test + body: + size: 0 + aggs: + mad_long: + median_absolute_deviation: + field: derived_object.long +- match: { hits.total.value: 5 } +- match: { aggregations.mad_long.value: 2.0 } + +--- +"Test simple pipeline agg derived_object": +- do: + search: + index: test + body: + size: 0 + aggs: + keywords: + terms: + field: derived_object.keyword + aggs: + sum_derived_longs: + sum: + field: derived_object.long + sum_total: + sum_bucket: + buckets_path: "keywords>sum_derived_longs" +- match: { hits.total.value: 5 } +- match: { aggregations.keywords.buckets.0.key: "bar" } +- match: { aggregations.keywords.buckets.0.sum_derived_longs.value: 2 } +- match: { aggregations.keywords.buckets.1.key: "baz" } +- match: { aggregations.keywords.buckets.1.sum_derived_longs.value: -3 } +- match: { aggregations.keywords.buckets.2.key: "foo" } +- match: { aggregations.keywords.buckets.2.sum_derived_longs.value: 1 } +- match: { aggregations.keywords.buckets.3.key: "quux" } +- match: { aggregations.keywords.buckets.3.sum_derived_longs.value: 5 } +- match: { aggregations.keywords.buckets.4.key: "qux" } +- match: { aggregations.keywords.buckets.4.sum_derived_longs.value: 4 } +- match: { aggregations.sum_total.value: 9 } + + +--- +"Test composite agg on derived_object": +- do: + search: + index: test + body: + size: 0 + aggs: + test_composite_agg: + composite: + size: 10 + sources: + - os: + terms: + field: derived_object.os + - keyword: + terms: + field: derived_object.keyword + - is_true: + terms: + field: derived_object.boolean + aggs: + avg_long: + avg: + field: derived_object.long +- length: { aggregations.test_composite_agg.buckets: 5 } +- match: { aggregations.test_composite_agg.buckets.0.key.os: "mac" } +- match: { aggregations.test_composite_agg.buckets.0.key.keyword: "baz" } +- match: { aggregations.test_composite_agg.buckets.0.key.is_true: true } +- match: { aggregations.test_composite_agg.buckets.0.doc_count: 1 } +- match: { aggregations.test_composite_agg.buckets.0.avg_long.value: -3.0 } +- match: { aggregations.test_composite_agg.buckets.1.key.os: "mac" } +- match: { aggregations.test_composite_agg.buckets.1.key.keyword: "foo" } +- match: { aggregations.test_composite_agg.buckets.1.key.is_true: true } +- match: { aggregations.test_composite_agg.buckets.1.doc_count: 1 } +- match: { aggregations.test_composite_agg.buckets.1.avg_long.value: 1.0 } +- match: { aggregations.test_composite_agg.buckets.2.key.os: "mac" } +- match: { aggregations.test_composite_agg.buckets.2.key.keyword: "quux" } +- match: { aggregations.test_composite_agg.buckets.2.key.is_true: true } +- match: { aggregations.test_composite_agg.buckets.2.doc_count: 1 } +- match: { aggregations.test_composite_agg.buckets.2.avg_long.value: 5.0 } +- match: { aggregations.test_composite_agg.buckets.3.key.os: "windows" } +- match: { aggregations.test_composite_agg.buckets.3.key.keyword: "bar" } +- match: { aggregations.test_composite_agg.buckets.3.key.is_true: false } +- match: { aggregations.test_composite_agg.buckets.3.doc_count: 1 } +- match: { aggregations.test_composite_agg.buckets.3.avg_long.value: 2.0 } +- match: { aggregations.test_composite_agg.buckets.4.key.os: "windows" } +- match: { aggregations.test_composite_agg.buckets.4.key.keyword: "qux" } +- match: { aggregations.test_composite_agg.buckets.4.key.is_true: false } +- match: { aggregations.test_composite_agg.buckets.4.doc_count: 1 } +- match: { aggregations.test_composite_agg.buckets.4.avg_long.value: 4.0 } + +--- +"Test auto date histogram on derived_object": +- do: + search: + rest_total_hits_as_int: true + index: test + body: + size: 0 + aggs: + test_auto_date_histogram: + auto_date_histogram: + field: "derived_object.date" + buckets: 10 + format: "yyyy-MM-dd" + aggs: + avg_long: + avg: + field: derived_object.long +- match: { hits.total: 5 } +- length: { aggregations.test_auto_date_histogram.buckets: 9 } +- match: { aggregations.test_auto_date_histogram.buckets.0.key_as_string: "2017-01-01"} +- match: { aggregations.test_auto_date_histogram.buckets.0.avg_long.value: 1.0} + +--- +"Test variable_width_histogram aggregation on derived_object": +- do: + search: + index: test + body: + size: 0 + aggs: + var_width_hist: + variable_width_histogram: + field: derived_object.long + buckets: 3 + +- match: { hits.total.value: 5 } +- length: { aggregations.var_width_hist.buckets: 3 } + +--- +"Test extended_stats aggregation on derived_object": +- do: + search: + index: test + body: + size: 0 + aggs: + extended_stats_agg: + extended_stats: + field: derived_object.long + +- match: { hits.total.value: 5 } +- match: { aggregations.extended_stats_agg.count: 5 } +- match: { aggregations.extended_stats_agg.min: -3 } +- match: { aggregations.extended_stats_agg.max: 5 } +- is_true: aggregations.extended_stats_agg.avg +- is_true: aggregations.extended_stats_agg.sum +- is_true: aggregations.extended_stats_agg.sum_of_squares +- is_true: aggregations.extended_stats_agg.variance +- is_true: aggregations.extended_stats_agg.std_deviation + +--- +"Test rare_terms aggregation on derived_object": +- do: + search: + index: test + body: + size: 0 + aggs: + rare_terms_agg: + rare_terms: + field: derived_object.keyword + max_doc_count: 1 + +- match: { hits.total.value: 5 } +- length: { aggregations.rare_terms_agg.buckets: 5 } + +--- +"Test global aggregation on derived_object": +- do: + search: + index: test + body: + query: + term: + derived_object.keyword: "foo" + aggs: + all_docs: + global: {} + aggs: + avg_long: + avg: + field: derived_object.long + +- match: { hits.total.value: 1 } +- match: { aggregations.all_docs.doc_count: 5 } +- match: { aggregations.all_docs.avg_long.value: 1.8 } + +--- +"Test value_count aggregation on derived_object": +- do: + search: + index: test + body: + size: 0 + aggs: + value_count_agg: + value_count: + field: derived_object.long + +- match: { hits.total.value: 5 } +- match: { aggregations.value_count_agg.value: 5 } + +--- +"Test multi_terms aggregation on derived_object": +- do: + search: + index: test + body: + size: 0 + aggs: + multi_terms_agg: + multi_terms: + terms: + - field: derived_object.keyword + - field: derived_object.os + size: 10 + +- match: { hits.total.value: 5 } +- length: { aggregations.multi_terms_agg.buckets: 5 } + + +### IP specific tests +--- +"Test terms aggregation on derived_object_ip": +- do: + search: + index: test + body: + size: 0 + aggs: + ip_terms: + terms: + field: derived_object.ip + +- match: { hits.total.value: 5 } +- length: { aggregations.ip_terms.buckets: 5 } +- match: { aggregations.ip_terms.buckets.0.key: "10.0.0.1" } +- match: { aggregations.ip_terms.buckets.0.doc_count: 1 } + +--- +"Test range aggregation on derived_object_ip": +- do: + search: + index: test + body: + size: 0 + aggs: + ip_ranges: + ip_range: + field: derived_object.ip + ranges: + - to: "10.0.0.0" + - from: "10.0.0.0" + to: "172.16.0.0" + - from: "172.16.0.0" + +- match: { hits.total.value: 5 } +- length: { aggregations.ip_ranges.buckets: 3 } +- match: { aggregations.ip_ranges.buckets.0.doc_count: 0 } +- match: { aggregations.ip_ranges.buckets.1.doc_count: 2 } +- match: { aggregations.ip_ranges.buckets.2.doc_count: 3 } + +--- +"Test cardinality aggregation on derived_object_ip": +- do: + search: + index: test + body: + size: 0 + aggs: + unique_ips: + cardinality: + field: derived_object.ip + +- match: { hits.total.value: 5 } +- match: { aggregations.unique_ips.value: 5 } + +--- +"Test missing aggregation on derived_object_ip": +- do: + search: + index: test + body: + size: 0 + aggs: + missing_ips: + missing: + field: derived_object.ip + +- match: { hits.total.value: 5 } +- match: { aggregations.missing_ips.doc_count: 0 } + +--- +"Test value count aggregation on derived_object_ip": +- do: + search: + index: test + body: + size: 0 + aggs: + ip_count: + value_count: + field: derived_object.ip + +- match: { hits.total.value: 5 } +- match: { aggregations.ip_count.value: 5 } + +### TEST UNSUPPORTED AGG TYPES +--- +"Test sig terms not supported": +- do: + catch: /illegal_argument_exception/ + search: + rest_total_hits_as_int: true + index: test + body: + query: + terms: + derived_keyword: ["foo"] + aggs: + significant_os: + significant_terms: + field: "derived_os" + min_doc_count: 1 + size: 10 + +--- +"Test significant text": +- do: + catch: /illegal_argument_exception/ + search: + rest_total_hits_as_int: true + index: test + body: + query: + terms: + derived_keyword: ["foo"] + aggs: + significant_words: + significant_text: + field: "derived_text" + size: 10 + min_doc_count: 1 + +--- +"Test scripted_metric aggregation": +- do: + catch: /A document doesn't have a value for a field/ + search: + index: test + body: + size: 0 + aggs: + scripted_metric_agg: + scripted_metric: + init_script: "state.arr = []" + map_script: "state.arr.add(doc.derived_long.value)" + combine_script: "return 0" + reduce_script: "return 0" + +--- +"Test geo_distance aggregation on derived_geo": +- do: + catch: /aggregation_execution_exception/ + search: + index: test + rest_total_hits_as_int: true + body: + size: 0 + aggs: + distance: + geo_distance: + field: derived_geo + origin: "35.7796, -78.6382" + ranges: + - to: 1000000 + - from: 1000000 + to: 5000000 + - from: 5000000 diff --git a/server/src/main/java/org/opensearch/index/mapper/DerivedFieldType.java b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldType.java index f0200e72c3bc2..e230e37e6d826 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DerivedFieldType.java +++ b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldType.java @@ -9,38 +9,50 @@ package org.opensearch.index.mapper; import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.queries.spans.SpanMultiTermQueryWrapper; import org.apache.lucene.queries.spans.SpanQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.util.BytesRef; import org.opensearch.common.Nullable; import org.opensearch.common.geo.ShapeRelation; +import org.opensearch.common.network.InetAddresses; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.time.DateMathParser; import org.opensearch.common.unit.Fuzziness; import org.opensearch.geometry.Geometry; import org.opensearch.index.analysis.IndexAnalyzers; import org.opensearch.index.analysis.NamedAnalyzer; +import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.query.DerivedFieldQuery; import org.opensearch.index.query.QueryShardContext; +import org.opensearch.script.AggregationScript; import org.opensearch.script.DerivedFieldScript; import org.opensearch.script.Script; +import org.opensearch.search.DocValueFormat; +import org.opensearch.search.lookup.LeafSearchLookup; import org.opensearch.search.lookup.SearchLookup; import java.io.IOException; +import java.net.InetAddress; import java.time.ZoneId; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Collectors; /** * MappedFieldType for Derived Fields * Contains logic to execute different type of queries on a derived field of given type. + * * @opensearch.internal */ @@ -49,6 +61,11 @@ public class DerivedFieldType extends MappedFieldType implements GeoShapeQueryab final FieldMapper typeFieldMapper; final Function indexableFieldGenerator; + @Override + public DocValueFormat docValueFormat(String format, ZoneId timeZone) { + return typeFieldMapper.mappedFieldType.docValueFormat(format, timeZone); + } + public DerivedFieldType( DerivedField derivedField, boolean isIndexed, @@ -134,6 +151,11 @@ public DerivedFieldValueFetcher valueFetcher(QueryShardContext context, SearchLo ); } + @Override + public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, Supplier searchLookup) { + return getFieldMapper().mappedFieldType.fielddataBuilder(fullyQualifiedIndexName, searchLookup); + } + @Override public Query termQuery(Object value, QueryShardContext context) { Query query = typeFieldMapper.mappedFieldType.termQuery(value, context); @@ -503,7 +525,7 @@ public Query existsQuery(QueryShardContext context) { @Override public boolean isAggregatable() { - return false; + return true; } private Query createConjuctionQuery(Query filterQuery, DerivedFieldQuery derivedFieldQuery) { @@ -529,4 +551,55 @@ public static DerivedFieldScript.LeafFactory getDerivedFieldLeafFactory( DerivedFieldScript.Factory factory = context.compile(script, DerivedFieldScript.CONTEXT); return factory.newFactory(script.getParams(), searchLookup); } + + public AggregationScript.LeafFactory getAggregationScript(QueryShardContext context) { + return new AggregationScript.LeafFactory() { + @Override + public AggregationScript newInstance(LeafReaderContext ctx) throws IOException { + final DerivedFieldValueFetcher derivedFieldValueFetcher = valueFetcher(context, context.lookup(), null); + derivedFieldValueFetcher.setNextReader(ctx); + final LeafSearchLookup leafSearchLookup = context.lookup().getLeafSearchLookup(ctx); + + return new AggregationScript(derivedField.getScript().getParams(), context.lookup(), ctx) { + @Override + public Object execute() { + return formatValues(derivedFieldValueFetcher.fetchValuesInternal(leafSearchLookup.source())); + } + + @Override + public void setDocument(int docid) { + super.setDocument(docid); + leafSearchLookup.source().setSegmentAndDocument(ctx, docid); + } + }; + } + + @Override + public boolean needs_score() { + return false; + } + }; + } + + // perform any formatting on the returned Object before passing to + // any values source. + private Object formatValues(List objects) { + // ips are returned as raw strings, format them as BytesRefs + // This ensures that ip_range aggs compare the bytesRef against ranges computed in the + // same way. + if (typeFieldMapper instanceof IpFieldMapper) { + return objects.stream().map(o -> (String) o).map(this::toBytesRef).collect(Collectors.toList()); + } + return objects; + } + + // format the ip string as BytesRef. + private BytesRef toBytesRef(String ip) { + if (ip == null) { + return null; + } + InetAddress address = InetAddresses.forString(ip); + byte[] bytes = InetAddressPoint.encode(address); + return new BytesRef(bytes); + } } diff --git a/server/src/main/java/org/opensearch/index/mapper/ObjectDerivedFieldType.java b/server/src/main/java/org/opensearch/index/mapper/ObjectDerivedFieldType.java index 7e5c9a3f3da93..3d0165f702fda 100644 --- a/server/src/main/java/org/opensearch/index/mapper/ObjectDerivedFieldType.java +++ b/server/src/main/java/org/opensearch/index/mapper/ObjectDerivedFieldType.java @@ -91,21 +91,22 @@ public DerivedFieldValueFetcher valueFetcher(QueryShardContext context, SearchLo derivedField.getFormat() != null ? DateFormatter.forPattern(derivedField.getFormat()) : null ); - Function valueForDisplayUpdated = derivedField.getType().equals(DerivedFieldSupportedTypes.DATE.getName()) ? (o -> { + Function dateFormatter = derivedField.getType().equals(DerivedFieldSupportedTypes.DATE.getName()) ? (o -> { // this is needed to support date type for nested fields as they are required to be converted to long if (o instanceof String) { - return valueForDisplay.apply(((DateFieldMapper) typeFieldMapper).fieldType().parse((String) o)); + return ((DateFieldMapper) typeFieldMapper).fieldType().parse((String) o); } else { - return valueForDisplay.apply(o); + return o; } - }) : valueForDisplay; + }) : null; String subFieldName = name().substring(name().indexOf(".") + 1); return new ObjectDerivedFieldValueFetcher( subFieldName, getDerivedFieldLeafFactory(derivedField.getScript(), context, searchLookup == null ? context.lookup() : searchLookup), - valueForDisplayUpdated, - derivedField.getIgnoreMalformed() + valueForDisplay, + derivedField.getIgnoreMalformed(), + dateFormatter ); } @@ -115,6 +116,8 @@ static class ObjectDerivedFieldValueFetcher extends DerivedFieldValueFetcher { // TODO add it as part of index setting? private final boolean ignoreOnMalFormed; + private final Function dateFormatter; + ObjectDerivedFieldValueFetcher( String subField, DerivedFieldScript.LeafFactory derivedFieldScriptFactory, @@ -124,6 +127,20 @@ static class ObjectDerivedFieldValueFetcher extends DerivedFieldValueFetcher { super(derivedFieldScriptFactory, valueForDisplay); this.subField = subField; this.ignoreOnMalFormed = ignoreOnMalFormed; + this.dateFormatter = null; + } + + ObjectDerivedFieldValueFetcher( + String subField, + DerivedFieldScript.LeafFactory derivedFieldScriptFactory, + Function valueForDisplay, + boolean ignoreOnMalFormed, + Function dateFormatter + ) { + super(derivedFieldScriptFactory, valueForDisplay); + this.subField = subField; + this.ignoreOnMalFormed = ignoreOnMalFormed; + this.dateFormatter = dateFormatter; } @Override @@ -140,7 +157,7 @@ public List fetchValuesInternal(SourceLookup lookup) { if (nestedFieldObj instanceof List) { result.addAll((List) nestedFieldObj); } else { - result.add(nestedFieldObj); + result.add(dateFormatter != null ? dateFormatter.apply(nestedFieldObj) : nestedFieldObj); } } catch (OpenSearchParseException e) { if (!ignoreOnMalFormed) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceConfig.java b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceConfig.java index d006b15df327c..b6c8fe5d4802c 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceConfig.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceConfig.java @@ -36,6 +36,7 @@ import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.IndexGeoPointFieldData; import org.opensearch.index.fielddata.IndexNumericFieldData; +import org.opensearch.index.mapper.DerivedFieldType; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.RangeFieldMapper; import org.opensearch.index.query.QueryShardContext; @@ -183,6 +184,12 @@ private static ValuesSourceConfig internalResolve( valuesSourceType = defaultValueSourceType; } DocValueFormat docValueFormat = resolveFormat(format, valuesSourceType, timeZone, fieldType); + + // If we are aggregating on derived field set the agg script. + if (fieldType instanceof DerivedFieldType) { + aggregationScript = ((DerivedFieldType) fieldType).getAggregationScript(context); + } + config = new ValuesSourceConfig( valuesSourceType, fieldContext, @@ -336,7 +343,7 @@ private ValuesSource ConstructValuesSource(Object missing, DocValueFormat format if (this.unmapped) { vs = valueSourceType().getEmpty(); } else { - if (fieldContext() == null) { + if (fieldContext() == null || fieldType() instanceof DerivedFieldType) { // Script case vs = valueSourceType().getScript(script(), scriptValueType()); } else { diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/values/ScriptBytesValues.java b/server/src/main/java/org/opensearch/search/aggregations/support/values/ScriptBytesValues.java index 349bd8e14edf6..30f7494ea2d18 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/values/ScriptBytesValues.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/values/ScriptBytesValues.java @@ -32,6 +32,7 @@ package org.opensearch.search.aggregations.support.values; import org.apache.lucene.search.Scorable; +import org.apache.lucene.util.BytesRef; import org.opensearch.common.lucene.ScorerAware; import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.index.fielddata.SortedBinaryDocValues; @@ -61,7 +62,11 @@ private void set(int i, Object o) { values[i].clear(); } else { CollectionUtils.ensureNoSelfReferences(o, "ScriptBytesValues value"); - values[i].copyChars(o.toString()); + if (o instanceof BytesRef) { + values[i].copyBytes((BytesRef) o); + } else { + values[i].copyChars(o.toString()); + } } } diff --git a/server/src/test/java/org/opensearch/index/mapper/DerivedFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/DerivedFieldTypeTests.java index f65acd0db0627..fe9db24f494ad 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DerivedFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DerivedFieldTypeTests.java @@ -15,14 +15,30 @@ import org.apache.lucene.document.LatLonPoint; import org.apache.lucene.document.LongField; import org.apache.lucene.document.LongPoint; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.memory.MemoryIndex; +import org.apache.lucene.util.BytesRef; import org.opensearch.OpenSearchException; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.network.InetAddresses; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.script.AggregationScript; import org.opensearch.script.Script; +import org.opensearch.search.lookup.LeafSearchLookup; +import org.opensearch.search.lookup.SearchLookup; +import org.opensearch.search.lookup.SourceLookup; +import java.io.IOException; import java.util.List; import static org.apache.lucene.index.IndexOptions.NONE; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class DerivedFieldTypeTests extends FieldTypeTestCase { @@ -100,4 +116,57 @@ public void testObjectType() { public void testUnsupportedType() { expectThrows(IllegalArgumentException.class, () -> createDerivedFieldType("match_only_text")); } + + public void testGetAggregationScript_keyword() throws IOException { + DerivedFieldType dft = spy(createDerivedFieldType("keyword")); + assertTrue(dft.isAggregatable()); + QueryShardContext mockContext = mock(QueryShardContext.class); + List expected = List.of("foo"); + mockValueFetcherForAggs(mockContext, dft, expected); + + AggregationScript.LeafFactory aggregationScript = dft.getAggregationScript(mockContext); + // have to use a memoryIndex because we can't mock leafReaderContext + MemoryIndex index = new MemoryIndex(); + LeafReaderContext leafReaderContext = index.createSearcher().getIndexReader().leaves().get(0); + AggregationScript script = aggregationScript.newInstance(leafReaderContext); + + Object result = script.execute(); + assertEquals(expected, result); + } + + public void testGetAggregationScript_ip() throws IOException { + DerivedFieldType dft = spy(createDerivedFieldType("ip")); + assertTrue(dft.isAggregatable()); + QueryShardContext mockContext = mock(QueryShardContext.class); + List expected = List.of("192.168.0.1"); + LeafSearchLookup leafSearchLookup = mockValueFetcherForAggs(mockContext, dft, expected); + SourceLookup sourceLookup = mock(SourceLookup.class); + when(leafSearchLookup.source()).thenReturn(sourceLookup); + AggregationScript.LeafFactory aggregationScript = dft.getAggregationScript(mockContext); + assertFalse(aggregationScript.needs_score()); + // have to use a memoryIndex because we can't mock leafReaderContext + MemoryIndex index = new MemoryIndex(); + LeafReaderContext leafReaderContext = index.createSearcher().getIndexReader().leaves().get(0); + AggregationScript script = aggregationScript.newInstance(leafReaderContext); + + // test setDocument + int docid = 1; + script.setDocument(docid); + verify(sourceLookup, times(1)).setSegmentAndDocument(any(), eq(docid)); + + // test execute + List result = (List) script.execute(); + assertEquals(new BytesRef(InetAddressPoint.encode(InetAddresses.forString((String) expected.get(0)))), result.get(0)); + } + + private static LeafSearchLookup mockValueFetcherForAggs(QueryShardContext mockContext, DerivedFieldType dft, List expected) { + SearchLookup searchLookup = mock(SearchLookup.class); + LeafSearchLookup leafLookup = mock(LeafSearchLookup.class); + when(searchLookup.getLeafSearchLookup(any())).thenReturn(leafLookup); + when(mockContext.lookup()).thenReturn(searchLookup); + DerivedFieldValueFetcher valueFetcher = mock(DerivedFieldValueFetcher.class); + when(valueFetcher.fetchValuesInternal(any())).thenReturn(expected); + doReturn(valueFetcher).when(dft).valueFetcher(any(), any(), any()); + return leafLookup; + } } diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/DerivedFieldAggregationTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/DerivedFieldAggregationTests.java new file mode 100644 index 0000000000000..2fb65d7fe3c46 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/DerivedFieldAggregationTests.java @@ -0,0 +1,146 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.bucket.terms; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.KeywordField; +import org.apache.lucene.document.TextField; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.opensearch.Version; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.Index; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.mapper.DerivedField; +import org.opensearch.index.mapper.DerivedFieldResolver; +import org.opensearch.index.mapper.DerivedFieldResolverFactory; +import org.opensearch.index.mapper.DerivedFieldType; +import org.opensearch.index.mapper.DerivedFieldValueFetcher; +import org.opensearch.index.mapper.KeywordFieldMapper; +import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.script.DerivedFieldScript; +import org.opensearch.script.Script; +import org.opensearch.search.aggregations.AggregatorTestCase; +import org.opensearch.search.lookup.LeafSearchLookup; +import org.opensearch.search.lookup.SearchLookup; +import org.opensearch.search.lookup.SourceLookup; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; + +public class DerivedFieldAggregationTests extends AggregatorTestCase { + + private QueryShardContext mockContext; + private List docs; + + private static final String[][] raw_requests = new String[][] { + { "40.135.0.0 GET /images/hm_bg.jpg HTTP/1.0", "200", "40.135.0.0" }, + { "232.0.0.0 GET /images/hm_bg.jpg HTTP/1.0", "400", "232.0.0.0" }, + { "26.1.0.0 GET /images/hm_bg.jpg HTTP/1.0", "200", "26.1.0.0" }, + { "247.37.0.0 GET /french/splash_inet.html HTTP/1.0", "400", "247.37.0.0" }, + { "247.37.0.0 GET /french/splash_inet.html HTTP/1.0", "400", "247.37.0.0" }, + { "247.37.0.0 GET /french/splash_inet.html HTTP/1.0", "200", "247.37.0.0" } }; + + @Before + public void init() { + super.initValuesSourceRegistry(); + // Create a mock QueryShardContext + mockContext = mock(QueryShardContext.class); + when(mockContext.index()).thenReturn(new Index("test_index", "uuid")); + when(mockContext.allowExpensiveQueries()).thenReturn(true); + + MapperService mockMapperService = mock(MapperService.class); + when(mockContext.getMapperService()).thenReturn(mockMapperService); + Settings indexSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .build(); + // Mock IndexSettings + IndexSettings mockIndexSettings = new IndexSettings( + IndexMetadata.builder("test_index").settings(indexSettings).build(), + Settings.EMPTY + ); + when(mockMapperService.getIndexSettings()).thenReturn(mockIndexSettings); + when(mockContext.getIndexSettings()).thenReturn(mockIndexSettings); + docs = new ArrayList<>(); + for (String[] request : raw_requests) { + Document document = new Document(); + document.add(new TextField("raw_request", request[0], Field.Store.YES)); + document.add(new KeywordField("status", request[1], Field.Store.YES)); + docs.add(document); + } + } + + public void testSimpleTermsAggregationWithDerivedField() throws IOException { + MappedFieldType keywordFieldType = new KeywordFieldMapper.KeywordFieldType("status"); + + SearchLookup searchLookup = mock(SearchLookup.class); + SourceLookup sourceLookup = new SourceLookup(); + LeafSearchLookup leafLookup = mock(LeafSearchLookup.class); + when(leafLookup.source()).thenReturn(sourceLookup); + + // Mock DerivedFieldScript.Factory + DerivedFieldScript.Factory factory = (params, lookup) -> (DerivedFieldScript.LeafFactory) ctx -> { + when(searchLookup.getLeafSearchLookup(any())).thenReturn(leafLookup); + return new DerivedFieldScript(params, lookup, ctx) { + @Override + public void execute() { + addEmittedValue(raw_requests[sourceLookup.docId()][1]); + } + + @Override + public void setDocument(int docid) { + sourceLookup.setSegmentAndDocument(ctx, docid); + } + }; + }; + + DerivedField derivedField = new DerivedField("derived_field", "keyword", new Script("")); + DerivedFieldResolver resolver = DerivedFieldResolverFactory.createResolver( + mockContext, + Collections.emptyMap(), + Collections.singletonList(derivedField), + true + ); + + // spy on the resolved type so we can mock the valuefetcher + DerivedFieldType derivedFieldType = spy((DerivedFieldType) resolver.resolve("derived_field")); + DerivedFieldScript.LeafFactory leafFactory = factory.newFactory((new Script("")).getParams(), searchLookup); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(leafFactory, null); + doReturn(valueFetcher).when(derivedFieldType).valueFetcher(any(), any(), any()); + + TermsAggregationBuilder aggregationBuilder = new TermsAggregationBuilder("derived_terms").field("status").size(10); + + testCase(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + for (Document d : docs) { + iw.addDocument(d); + } + }, (InternalTerms result) -> { + assertEquals(2, result.getBuckets().size()); + List buckets = result.getBuckets(); + assertEquals("200", buckets.get(0).getKey()); + assertEquals(3, buckets.get(0).getDocCount()); + assertEquals("400", buckets.get(1).getKey()); + assertEquals(3, buckets.get(1).getDocCount()); + }, keywordFieldType, derivedFieldType); + } +} diff --git a/server/src/test/java/org/opensearch/search/aggregations/support/ValuesSourceConfigTests.java b/server/src/test/java/org/opensearch/search/aggregations/support/ValuesSourceConfigTests.java index 33d9a63f61a35..568c3c950f588 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/support/ValuesSourceConfigTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/support/ValuesSourceConfigTests.java @@ -37,9 +37,12 @@ import org.apache.lucene.util.BytesRef; import org.opensearch.action.support.WriteRequest; import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexService; import org.opensearch.index.engine.Engine; import org.opensearch.index.fielddata.SortedBinaryDocValues; +import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.QueryShardContext; import org.opensearch.test.OpenSearchSingleNodeTestCase; @@ -334,4 +337,40 @@ public void testFieldAlias() throws Exception { assertEquals(new BytesRef("value"), values.nextValue()); } } + + public void testDerivedField() throws Exception { + String script = "derived_field_script"; + String derived_field = "derived_keyword"; + + XContentBuilder mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("derived") + .startObject(derived_field) + .field("type", "keyword") + .startObject("script") + .field("source", script) + .field("lang", "mockscript") + .endObject() + .endObject() + .endObject() + .endObject(); + IndexService indexService = createIndex("index", Settings.EMPTY, MapperService.SINGLE_MAPPING_NAME, mapping); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + + try (Engine.Searcher searcher = indexService.getShard(0).acquireSearcher("test")) { + QueryShardContext context = indexService.newQueryShardContext(0, searcher, () -> 42L, null); + ValuesSourceConfig config = ValuesSourceConfig.resolve( + context, + null, + derived_field, + null, + null, + null, + null, + CoreValuesSourceType.BYTES + ); + assertNotNull(script); + assertEquals(ValuesSource.Bytes.Script.class, config.getValuesSource().getClass()); + } + } } From 6dbb079ba85ad14e261f0a402a118992482a848e Mon Sep 17 00:00:00 2001 From: David Zane <38449481+dzane17@users.noreply.github.com> Date: Mon, 29 Jul 2024 14:16:39 -0700 Subject: [PATCH 27/68] Remove mmap.extensions setting (#9392) * Remove mmap.extensions setting in favor of nio.extensions Signed-off-by: David Zane * Update CHANGELOG-3.0.md Co-authored-by: Andriy Redko Signed-off-by: David Zane <38449481+dzane17@users.noreply.github.com> --------- Signed-off-by: David Zane Signed-off-by: David Zane <38449481+dzane17@users.noreply.github.com> Co-authored-by: Andriy Redko --- CHANGELOG-3.0.md | 1 + .../common/settings/IndexScopedSettings.java | 1 - .../org/opensearch/index/IndexModule.java | 77 +----------------- .../index/store/FsDirectoryFactory.java | 19 +---- .../index/store/FsDirectoryFactoryTests.java | 79 +------------------ 5 files changed, 5 insertions(+), 172 deletions(-) diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index 06b761b1df8bd..48d978bede420 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -43,6 +43,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Remove LegacyESVersion.V_7_10_ Constants ([#5018](https://github.com/opensearch-project/OpenSearch/pull/5018)) - Remove Version.V_1_ Constants ([#5021](https://github.com/opensearch-project/OpenSearch/pull/5021)) - Remove custom Map, List and Set collection classes ([#6871](https://github.com/opensearch-project/OpenSearch/pull/6871)) +- Remove `index.store.hybrid.mmap.extensions` setting in favor of `index.store.hybrid.nio.extensions` setting ([#9392](https://github.com/opensearch-project/OpenSearch/pull/9392)) ### Fixed - Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827)) diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index 6e7d77d0c00d4..a4d60bc76127c 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -191,7 +191,6 @@ public final class IndexScopedSettings extends AbstractScopedSettings { BitsetFilterCache.INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING, IndexModule.INDEX_STORE_TYPE_SETTING, IndexModule.INDEX_STORE_PRE_LOAD_SETTING, - IndexModule.INDEX_STORE_HYBRID_MMAP_EXTENSIONS, IndexModule.INDEX_STORE_HYBRID_NIO_EXTENSIONS, IndexModule.INDEX_RECOVERY_TYPE_SETTING, IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING, diff --git a/server/src/main/java/org/opensearch/index/IndexModule.java b/server/src/main/java/org/opensearch/index/IndexModule.java index 93ff1b78b1ac5..eab070e1c6c10 100644 --- a/server/src/main/java/org/opensearch/index/IndexModule.java +++ b/server/src/main/java/org/opensearch/index/IndexModule.java @@ -97,7 +97,6 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; @@ -183,52 +182,7 @@ public final class IndexModule { Property.PrivateIndex ); - /** Which lucene file extensions to load with the mmap directory when using hybridfs store. This settings is ignored if {@link #INDEX_STORE_HYBRID_NIO_EXTENSIONS} is set. - * This is an expert setting. - * @see Lucene File Extensions. - * - * @deprecated This setting will be removed in OpenSearch 3.x. Use {@link #INDEX_STORE_HYBRID_NIO_EXTENSIONS} instead. - */ - @Deprecated - public static final Setting> INDEX_STORE_HYBRID_MMAP_EXTENSIONS = Setting.listSetting( - "index.store.hybrid.mmap.extensions", - List.of("nvd", "dvd", "tim", "tip", "dim", "kdd", "kdi", "cfs", "doc"), - Function.identity(), - new Setting.Validator>() { - - @Override - public void validate(final List value) {} - - @Override - public void validate(final List value, final Map, Object> settings) { - if (value.equals(INDEX_STORE_HYBRID_MMAP_EXTENSIONS.getDefault(Settings.EMPTY)) == false) { - final List nioExtensions = (List) settings.get(INDEX_STORE_HYBRID_NIO_EXTENSIONS); - final List defaultNioExtensions = INDEX_STORE_HYBRID_NIO_EXTENSIONS.getDefault(Settings.EMPTY); - if (nioExtensions.equals(defaultNioExtensions) == false) { - throw new IllegalArgumentException( - "Settings " - + INDEX_STORE_HYBRID_NIO_EXTENSIONS.getKey() - + " & " - + INDEX_STORE_HYBRID_MMAP_EXTENSIONS.getKey() - + " cannot both be set. Use " - + INDEX_STORE_HYBRID_NIO_EXTENSIONS.getKey() - + " only." - ); - } - } - } - - @Override - public Iterator> settings() { - return List.>of(INDEX_STORE_HYBRID_NIO_EXTENSIONS).iterator(); - } - }, - Property.IndexScope, - Property.NodeScope, - Property.Deprecated - ); - - /** Which lucene file extensions to load with nio. All others will default to mmap. Takes precedence over {@link #INDEX_STORE_HYBRID_MMAP_EXTENSIONS}. + /** Which lucene file extensions to load with nio. All others will default to mmap. * This is an expert setting. * @see Lucene File Extensions. */ @@ -253,35 +207,6 @@ public Iterator> settings() { "vem" ), Function.identity(), - new Setting.Validator>() { - - @Override - public void validate(final List value) {} - - @Override - public void validate(final List value, final Map, Object> settings) { - if (value.equals(INDEX_STORE_HYBRID_NIO_EXTENSIONS.getDefault(Settings.EMPTY)) == false) { - final List mmapExtensions = (List) settings.get(INDEX_STORE_HYBRID_MMAP_EXTENSIONS); - final List defaultMmapExtensions = INDEX_STORE_HYBRID_MMAP_EXTENSIONS.getDefault(Settings.EMPTY); - if (mmapExtensions.equals(defaultMmapExtensions) == false) { - throw new IllegalArgumentException( - "Settings " - + INDEX_STORE_HYBRID_NIO_EXTENSIONS.getKey() - + " & " - + INDEX_STORE_HYBRID_MMAP_EXTENSIONS.getKey() - + " cannot both be set. Use " - + INDEX_STORE_HYBRID_NIO_EXTENSIONS.getKey() - + " only." - ); - } - } - } - - @Override - public Iterator> settings() { - return List.>of(INDEX_STORE_HYBRID_MMAP_EXTENSIONS).iterator(); - } - }, Property.IndexScope, Property.NodeScope ); diff --git a/server/src/main/java/org/opensearch/index/store/FsDirectoryFactory.java b/server/src/main/java/org/opensearch/index/store/FsDirectoryFactory.java index a46b641d1423f..c963f8aa95b8d 100644 --- a/server/src/main/java/org/opensearch/index/store/FsDirectoryFactory.java +++ b/server/src/main/java/org/opensearch/index/store/FsDirectoryFactory.java @@ -45,7 +45,6 @@ import org.apache.lucene.store.SimpleFSLockFactory; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; -import org.opensearch.common.settings.Settings; import org.opensearch.common.util.io.IOUtils; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; @@ -57,8 +56,6 @@ import java.nio.file.Path; import java.util.HashSet; import java.util.Set; -import java.util.stream.Collectors; -import java.util.stream.Stream; /** * Factory for a filesystem directory @@ -100,21 +97,7 @@ protected Directory newFSDirectory(Path location, LockFactory lockFactory, Index case HYBRIDFS: // Use Lucene defaults final FSDirectory primaryDirectory = FSDirectory.open(location, lockFactory); - final Set nioExtensions; - final Set mmapExtensions = Set.copyOf(indexSettings.getValue(IndexModule.INDEX_STORE_HYBRID_MMAP_EXTENSIONS)); - if (mmapExtensions.equals( - new HashSet(IndexModule.INDEX_STORE_HYBRID_MMAP_EXTENSIONS.getDefault(Settings.EMPTY)) - ) == false) { - // If the mmap extension setting was defined, then compute nio extensions by subtracting out the - // mmap extensions from the set of all extensions. - nioExtensions = Stream.concat( - IndexModule.INDEX_STORE_HYBRID_NIO_EXTENSIONS.getDefault(Settings.EMPTY).stream(), - IndexModule.INDEX_STORE_HYBRID_MMAP_EXTENSIONS.getDefault(Settings.EMPTY).stream() - ).filter(e -> mmapExtensions.contains(e) == false).collect(Collectors.toUnmodifiableSet()); - } else { - // Otherwise, get the list of nio extensions from the nio setting - nioExtensions = Set.copyOf(indexSettings.getValue(IndexModule.INDEX_STORE_HYBRID_NIO_EXTENSIONS)); - } + final Set nioExtensions = new HashSet<>(indexSettings.getValue(IndexModule.INDEX_STORE_HYBRID_NIO_EXTENSIONS)); if (primaryDirectory instanceof MMapDirectory) { MMapDirectory mMapDirectory = (MMapDirectory) primaryDirectory; return new HybridDirectory(lockFactory, setPreload(mMapDirectory, lockFactory, preLoadExtensions), nioExtensions); diff --git a/server/src/test/java/org/opensearch/index/store/FsDirectoryFactoryTests.java b/server/src/test/java/org/opensearch/index/store/FsDirectoryFactoryTests.java index 2fffebbcf5f1f..95113b7eeb370 100644 --- a/server/src/test/java/org/opensearch/index/store/FsDirectoryFactoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/FsDirectoryFactoryTests.java @@ -96,7 +96,7 @@ public void testPreload() throws IOException { build = Settings.builder() .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.HYBRIDFS.name().toLowerCase(Locale.ROOT)) .putList(IndexModule.INDEX_STORE_PRE_LOAD_SETTING.getKey(), "nvd", "dvd", "cfs") - .putList(IndexModule.INDEX_STORE_HYBRID_NIO_EXTENSIONS.getKey(), "tip", "dim", "kdd", "kdi", "cfs", "doc") + .putList(IndexModule.INDEX_STORE_HYBRID_NIO_EXTENSIONS.getKey(), "tip", "dim", "kdd", "kdi", "cfs", "doc", "new") .build(); try (Directory directory = newDirectory(build)) { assertTrue(FsDirectoryFactory.isHybridFs(directory)); @@ -108,7 +108,7 @@ public void testPreload() throws IOException { assertTrue(hybridDirectory.useDelegate("foo.tim")); assertTrue(hybridDirectory.useDelegate("foo.pos")); assertTrue(hybridDirectory.useDelegate("foo.pay")); - assertTrue(hybridDirectory.useDelegate("foo.new")); + assertFalse(hybridDirectory.useDelegate("foo.new")); assertFalse(hybridDirectory.useDelegate("foo.tip")); assertFalse(hybridDirectory.useDelegate("foo.dim")); assertFalse(hybridDirectory.useDelegate("foo.kdd")); @@ -123,63 +123,6 @@ public void testPreload() throws IOException { assertTrue(preLoadMMapDirectory.useDelegate("foo.cfs")); assertTrue(preLoadMMapDirectory.useDelegate("foo.nvd")); } - build = Settings.builder() - .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.HYBRIDFS.name().toLowerCase(Locale.ROOT)) - .putList(IndexModule.INDEX_STORE_PRE_LOAD_SETTING.getKey(), "nvd", "dvd", "cfs") - .putList(IndexModule.INDEX_STORE_HYBRID_MMAP_EXTENSIONS.getKey(), "nvd", "dvd", "tim", "pos") - .build(); - try (Directory directory = newDirectory(build)) { - assertTrue(FsDirectoryFactory.isHybridFs(directory)); - FsDirectoryFactory.HybridDirectory hybridDirectory = (FsDirectoryFactory.HybridDirectory) directory; - // test custom hybrid mmap extensions - // true->mmap, false->nio - assertTrue(hybridDirectory.useDelegate("foo.nvd")); - assertTrue(hybridDirectory.useDelegate("foo.dvd")); - assertTrue(hybridDirectory.useDelegate("foo.tim")); - assertTrue(hybridDirectory.useDelegate("foo.pos")); - assertTrue(hybridDirectory.useDelegate("foo.new")); - assertFalse(hybridDirectory.useDelegate("foo.pay")); - assertFalse(hybridDirectory.useDelegate("foo.tip")); - assertFalse(hybridDirectory.useDelegate("foo.dim")); - assertFalse(hybridDirectory.useDelegate("foo.kdd")); - assertFalse(hybridDirectory.useDelegate("foo.kdi")); - assertFalse(hybridDirectory.useDelegate("foo.cfs")); - assertFalse(hybridDirectory.useDelegate("foo.doc")); - MMapDirectory delegate = hybridDirectory.getDelegate(); - assertThat(delegate, Matchers.instanceOf(FsDirectoryFactory.PreLoadMMapDirectory.class)); - assertWarnings( - "[index.store.hybrid.mmap.extensions] setting was deprecated in OpenSearch and will be removed in a future release!" - + " See the breaking changes documentation for the next major version." - ); - } - build = Settings.builder() - .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.HYBRIDFS.name().toLowerCase(Locale.ROOT)) - .putList(IndexModule.INDEX_STORE_PRE_LOAD_SETTING.getKey(), "nvd", "dvd", "cfs") - .putList(IndexModule.INDEX_STORE_HYBRID_MMAP_EXTENSIONS.getKey(), "nvd", "dvd", "tim", "pos") - .putList(IndexModule.INDEX_STORE_HYBRID_NIO_EXTENSIONS.getKey(), "nvd", "dvd", "tim", "pos") - .build(); - try { - newDirectory(build); - } catch (final Exception e) { - assertEquals( - "Settings index.store.hybrid.nio.extensions & index.store.hybrid.mmap.extensions cannot both be set. Use index.store.hybrid.nio.extensions only.", - e.getMessage() - ); - } - build = Settings.builder() - .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.HYBRIDFS.name().toLowerCase(Locale.ROOT)) - .putList(IndexModule.INDEX_STORE_PRE_LOAD_SETTING.getKey(), "nvd", "dvd", "cfs") - .putList(IndexModule.INDEX_STORE_HYBRID_NIO_EXTENSIONS.getKey(), "nvd", "dvd", "tim", "pos") - .putList(IndexModule.INDEX_STORE_HYBRID_MMAP_EXTENSIONS.getKey(), "nvd", "dvd", "tim", "pos") - .build(); - try { - newDirectory(build); - } catch (final Exception e) { - assertEquals( - "Settings index.store.hybrid.nio.extensions & index.store.hybrid.mmap.extensions cannot both be set. Use index.store.hybrid.nio.extensions only.", - e.getMessage() - ); - } build = Settings.builder() .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.HYBRIDFS.name().toLowerCase(Locale.ROOT)) .putList(IndexModule.INDEX_STORE_PRE_LOAD_SETTING.getKey(), "nvd", "dvd", "cfs") @@ -198,24 +141,6 @@ public void testPreload() throws IOException { MMapDirectory delegate = hybridDirectory.getDelegate(); assertThat(delegate, Matchers.instanceOf(FsDirectoryFactory.PreLoadMMapDirectory.class)); } - build = Settings.builder() - .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.HYBRIDFS.name().toLowerCase(Locale.ROOT)) - .putList(IndexModule.INDEX_STORE_PRE_LOAD_SETTING.getKey(), "nvd", "dvd", "cfs") - .putList(IndexModule.INDEX_STORE_HYBRID_MMAP_EXTENSIONS.getKey()) - .build(); - try (Directory directory = newDirectory(build)) { - assertTrue(FsDirectoryFactory.isHybridFs(directory)); - FsDirectoryFactory.HybridDirectory hybridDirectory = (FsDirectoryFactory.HybridDirectory) directory; - // test custom hybrid mmap extensions - // true->mmap, false->nio - assertTrue(hybridDirectory.useDelegate("foo.new")); - assertFalse(hybridDirectory.useDelegate("foo.nvd")); - assertFalse(hybridDirectory.useDelegate("foo.dvd")); - assertFalse(hybridDirectory.useDelegate("foo.cfs")); - assertFalse(hybridDirectory.useDelegate("foo.doc")); - MMapDirectory delegate = hybridDirectory.getDelegate(); - assertThat(delegate, Matchers.instanceOf(FsDirectoryFactory.PreLoadMMapDirectory.class)); - } } private Directory newDirectory(Settings settings) throws IOException { From 0cde7baf438e6ab994114b71dd9fadcacac4e443 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Mon, 29 Jul 2024 17:08:54 -0700 Subject: [PATCH 28/68] Fix derived field tests for percentile ranks. (#15015) These tests fail to backport to 2.x becuase 2.x uses a different branch of tdigest that computes percentiles differently. Rather than chase these over time, change the assertions to check for the length of results returned instead of their values. Signed-off-by: Marc Handalian --- .../derived_fields/60_derived_field_aggs.yml | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/60_derived_field_aggs.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/60_derived_field_aggs.yml index ba879a5fd73c3..87c260ce5f308 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/60_derived_field_aggs.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/60_derived_field_aggs.yml @@ -413,9 +413,7 @@ setup: percents: [ 25, 50, 75 ] - match: { hits.total.value: 5 } -- match: { aggregations.double_percentiles.values.25\.0: 1.0 } -- match: { aggregations.double_percentiles.values.50\.0: 2.0 } -- match: { aggregations.double_percentiles.values.75\.0: 4.0 } +- length: { aggregations.double_percentiles.values: 3} --- "Test percentile ranks aggregation on derived_long": @@ -431,8 +429,7 @@ setup: values: [ 2, 4 ] - match: { hits.total.value: 5 } -- match: { aggregations.long_percentile_ranks.values.2\.0: 50.0 } -- match: { aggregations.long_percentile_ranks.values.4\.0: 70.0 } +- length: { aggregations.long_percentile_ranks.values: 2} --- "Test top hits aggregation on derived_keyword": @@ -1071,9 +1068,7 @@ setup: percents: [ 25, 50, 75 ] - match: { hits.total.value: 5 } -- match: { aggregations.double_percentiles.values.25\.0: 1.0 } -- match: { aggregations.double_percentiles.values.50\.0: 2.0 } -- match: { aggregations.double_percentiles.values.75\.0: 4.0 } +- length: { aggregations.double_percentiles.values: 3} --- "Test percentile ranks aggregation on derived_object.long": @@ -1089,8 +1084,7 @@ setup: values: [ 2, 4 ] - match: { hits.total.value: 5 } -- match: { aggregations.long_percentile_ranks.values.2\.0: 50.0 } -- match: { aggregations.long_percentile_ranks.values.4\.0: 70.0 } +- length: { aggregations.long_percentile_ranks.values: 2} --- "Test top hits aggregation on derived_object.keyword": From 03b1306b3cf2f4a37634ea6aca89512803541de6 Mon Sep 17 00:00:00 2001 From: gaobinlong Date: Tue, 30 Jul 2024 19:45:15 +0800 Subject: [PATCH 29/68] Fix version check in yml test for the bug fix of constant_keyword field type not working (#15019) Signed-off-by: Gao Binlong --- .../rest-api-spec/test/index/110_constant_keyword.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/110_constant_keyword.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/110_constant_keyword.yml index 9864bfbbb26e9..f4f8b3752bec8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/110_constant_keyword.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/110_constant_keyword.yml @@ -8,8 +8,8 @@ "Mappings and Supported queries": - skip: - version: " - 2.99.99" - reason: "fixed in 3.0.0" + version: " - 2.15.99" + reason: "fixed in 2.16.0" # Create index with constant_keyword field type - do: From ffa67f9ad7b00739d7471166ba1f2cc5ec1ecbf5 Mon Sep 17 00:00:00 2001 From: panguixin Date: Tue, 30 Jul 2024 23:24:07 +0800 Subject: [PATCH 30/68] Fix missing value of FieldSort for unsigned_long (#14963) * Fix missing value of FieldSort for unsigned_long Signed-off-by: panguixin * add changelog Signed-off-by: panguixin * apply review comments Signed-off-by: panguixin --------- Signed-off-by: panguixin --- CHANGELOG.md | 1 + .../opensearch/search/sort/FieldSortIT.java | 46 ++++++++++++++++++- .../UnsignedLongValuesComparatorSource.java | 8 +++- 3 files changed, 52 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 36cd33cc40453..f619b6b85c649 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Removed ### Fixed +- Fix missing value of FieldSort for unsigned_long ([#14963](https://github.com/opensearch-project/OpenSearch/pull/14963)) ### Security diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java index e40928f15e8a8..fdb12639c65be 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java @@ -42,6 +42,7 @@ import org.opensearch.action.bulk.BulkRequestBuilder; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; +import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.cluster.metadata.IndexMetadata; @@ -90,6 +91,7 @@ import static org.opensearch.script.MockScriptPlugin.NAME; import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFailures; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFirstHit; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; @@ -919,7 +921,7 @@ public void testSortMissingNumbers() throws Exception { client().prepareIndex("test") .setId("3") .setSource( - jsonBuilder().startObject().field("id", "3").field("i_value", 2).field("d_value", 2.2).field("u_value", 2).endObject() + jsonBuilder().startObject().field("id", "3").field("i_value", 2).field("d_value", 2.2).field("u_value", 3).endObject() ) .get(); @@ -964,6 +966,18 @@ public void testSortMissingNumbers() throws Exception { assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("1")); assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); + logger.info("--> sort with custom missing value"); + searchResponse = client().prepareSearch() + .setQuery(matchAllQuery()) + .addSort(SortBuilders.fieldSort("i_value").order(SortOrder.ASC).missing(randomBoolean() ? 1 : "1")) + .get(); + assertNoFailures(searchResponse); + + assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); + assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); + assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2")); + assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); + // FLOAT logger.info("--> sort with no missing (same as missing _last)"); searchResponse = client().prepareSearch() @@ -1001,6 +1015,18 @@ public void testSortMissingNumbers() throws Exception { assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("1")); assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); + logger.info("--> sort with custom missing value"); + searchResponse = client().prepareSearch() + .setQuery(matchAllQuery()) + .addSort(SortBuilders.fieldSort("d_value").order(SortOrder.ASC).missing(randomBoolean() ? 1.1 : "1.1")) + .get(); + assertNoFailures(searchResponse); + + assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); + assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); + assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2")); + assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); + // UNSIGNED_LONG logger.info("--> sort with no missing (same as missing _last)"); searchResponse = client().prepareSearch() @@ -1037,6 +1063,24 @@ public void testSortMissingNumbers() throws Exception { assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("2")); assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("1")); assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); + + logger.info("--> sort with custom missing value"); + searchResponse = client().prepareSearch() + .setQuery(matchAllQuery()) + .addSort(SortBuilders.fieldSort("u_value").order(SortOrder.ASC).missing(randomBoolean() ? 2 : "2")) + .get(); + assertNoFailures(searchResponse); + + assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); + assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); + assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2")); + assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); + + logger.info("--> sort with negative missing value"); + SearchRequestBuilder searchRequestBuilder = client().prepareSearch() + .setQuery(matchAllQuery()) + .addSort(SortBuilders.fieldSort("u_value").order(SortOrder.ASC).missing(randomBoolean() ? -1 : "-1")); + assertFailures(searchRequestBuilder, RestStatus.BAD_REQUEST, containsString("Value [-1] is out of range for an unsigned long")); } public void testSortMissingNumbersMinMax() throws Exception { diff --git a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/UnsignedLongValuesComparatorSource.java b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/UnsignedLongValuesComparatorSource.java index 3714561b63e44..9db5817450cd0 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/UnsignedLongValuesComparatorSource.java +++ b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/UnsignedLongValuesComparatorSource.java @@ -81,9 +81,13 @@ public Object missingObject(Object missingValue, boolean reversed) { return min ? Numbers.MIN_UNSIGNED_LONG_VALUE : Numbers.MAX_UNSIGNED_LONG_VALUE; } else { if (missingValue instanceof Number) { - return ((Number) missingValue); + return Numbers.toUnsignedLongExact((Number) missingValue); } else { - return new BigInteger(missingValue.toString()); + BigInteger missing = new BigInteger(missingValue.toString()); + if (missing.signum() < 0) { + throw new IllegalArgumentException("Value [" + missingValue + "] is out of range for an unsigned long"); + } + return missing; } } } From 09276b372269b48187976ddc2c39bb95dd862544 Mon Sep 17 00:00:00 2001 From: Rishab Nahata Date: Tue, 30 Jul 2024 23:15:34 +0530 Subject: [PATCH 31/68] Add lower limit for primary and replica batch allocators timeout (#14979) * Add lower limit for primary and replica batch allocators Signed-off-by: Rishab Nahata --- CHANGELOG.md | 1 + .../gateway/RecoveryFromGatewayIT.java | 6 +- .../gateway/ShardsBatchGatewayAllocator.java | 39 ++++++++++++- .../gateway/GatewayAllocatorTests.java | 55 +++++++++++++++++++ 4 files changed, 96 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f619b6b85c649..a5a3e9c60b664 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `actions/github-script` from 6 to 7 ([#14997](https://github.com/opensearch-project/OpenSearch/pull/14997)) ### Changed +- Add lower limit for primary and replica batch allocators timeout ([#14979](https://github.com/opensearch-project/OpenSearch/pull/14979)) ### Deprecated diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java index eccc903dfac82..bcf23a37c0010 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java @@ -886,7 +886,7 @@ public void testBatchModeEnabledWithSufficientTimeoutAndClusterGreen() throws Ex assertEquals(0, gatewayAllocator.getNumberOfInFlightFetches()); } - public void testBatchModeEnabledWithInSufficientTimeoutButClusterGreen() throws Exception { + public void testBatchModeEnabledWithDisabledTimeoutAndClusterGreen() throws Exception { internalCluster().startClusterManagerOnlyNodes( 1, @@ -920,8 +920,8 @@ public void testBatchModeEnabledWithInSufficientTimeoutButClusterGreen() throws .put("node.name", clusterManagerName) .put(clusterManagerDataPathSettings) .put(ShardsBatchGatewayAllocator.GATEWAY_ALLOCATOR_BATCH_SIZE.getKey(), 5) - .put(ShardsBatchGatewayAllocator.PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING.getKey(), "10ms") - .put(ShardsBatchGatewayAllocator.REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING.getKey(), "10ms") + .put(ShardsBatchGatewayAllocator.PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING.getKey(), "-1") + .put(ShardsBatchGatewayAllocator.REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING.getKey(), "-1") .put(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_BATCH_MODE.getKey(), true) .build() ); diff --git a/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java b/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java index 673ed8dbaa1c3..6c6b1126a78d6 100644 --- a/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java @@ -73,13 +73,14 @@ public class ShardsBatchGatewayAllocator implements ExistingShardsAllocator { private final long maxBatchSize; private static final short DEFAULT_SHARD_BATCH_SIZE = 2000; - private static final String PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY = + public static final String PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY = "cluster.routing.allocation.shards_batch_gateway_allocator.primary_allocator_timeout"; - private static final String REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY = + public static final String REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY = "cluster.routing.allocation.shards_batch_gateway_allocator.replica_allocator_timeout"; private TimeValue primaryShardsBatchGatewayAllocatorTimeout; private TimeValue replicaShardsBatchGatewayAllocatorTimeout; + public static final TimeValue MIN_ALLOCATOR_TIMEOUT = TimeValue.timeValueSeconds(20); /** * Number of shards we send in one batch to data nodes for fetching metadata @@ -92,16 +93,50 @@ public class ShardsBatchGatewayAllocator implements ExistingShardsAllocator { Setting.Property.NodeScope ); + /** + * Timeout for existing primary shards batch allocator. + * Timeout value must be greater than or equal to 20s or -1ms to effectively disable timeout + */ public static final Setting PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING = Setting.timeSetting( PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY, TimeValue.MINUS_ONE, + TimeValue.MINUS_ONE, + new Setting.Validator<>() { + @Override + public void validate(TimeValue timeValue) { + if (timeValue.compareTo(MIN_ALLOCATOR_TIMEOUT) < 0 && timeValue.compareTo(TimeValue.MINUS_ONE) != 0) { + throw new IllegalArgumentException( + "Setting [" + + PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING.getKey() + + "] should be more than 20s or -1ms to disable timeout" + ); + } + } + }, Setting.Property.NodeScope, Setting.Property.Dynamic ); + /** + * Timeout for existing replica shards batch allocator. + * Timeout value must be greater than or equal to 20s or -1ms to effectively disable timeout + */ public static final Setting REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING = Setting.timeSetting( REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY, TimeValue.MINUS_ONE, + TimeValue.MINUS_ONE, + new Setting.Validator<>() { + @Override + public void validate(TimeValue timeValue) { + if (timeValue.compareTo(MIN_ALLOCATOR_TIMEOUT) < 0 && timeValue.compareTo(TimeValue.MINUS_ONE) != 0) { + throw new IllegalArgumentException( + "Setting [" + + REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING.getKey() + + "] should be more than 20s or -1ms to disable timeout" + ); + } + } + }, Setting.Property.NodeScope, Setting.Property.Dynamic ); diff --git a/server/src/test/java/org/opensearch/gateway/GatewayAllocatorTests.java b/server/src/test/java/org/opensearch/gateway/GatewayAllocatorTests.java index bd56123f6df1f..1596a0b566b28 100644 --- a/server/src/test/java/org/opensearch/gateway/GatewayAllocatorTests.java +++ b/server/src/test/java/org/opensearch/gateway/GatewayAllocatorTests.java @@ -47,6 +47,11 @@ import java.util.Set; import java.util.stream.Collectors; +import static org.opensearch.gateway.ShardsBatchGatewayAllocator.PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING; +import static org.opensearch.gateway.ShardsBatchGatewayAllocator.PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY; +import static org.opensearch.gateway.ShardsBatchGatewayAllocator.REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING; +import static org.opensearch.gateway.ShardsBatchGatewayAllocator.REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY; + public class GatewayAllocatorTests extends OpenSearchAllocationTestCase { private final Logger logger = LogManager.getLogger(GatewayAllocatorTests.class); @@ -368,6 +373,56 @@ public void testCreatePrimaryAndReplicaExecutorOfSizeTwo() { assertEquals(executor.getTimeoutAwareRunnables().size(), 2); } + public void testPrimaryAllocatorTimeout() { + // Valid setting with timeout = 20s + Settings build = Settings.builder().put(PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY, "20s").build(); + assertEquals(20, PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING.get(build).getSeconds()); + + // Valid setting with timeout > 20s + build = Settings.builder().put(PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY, "30000ms").build(); + assertEquals(30, PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING.get(build).getSeconds()); + + // Invalid setting with timeout < 20s + Settings lessThan20sSetting = Settings.builder().put(PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY, "10s").build(); + IllegalArgumentException iae = expectThrows( + IllegalArgumentException.class, + () -> PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING.get(lessThan20sSetting) + ); + assertEquals( + "Setting [" + PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING.getKey() + "] should be more than 20s or -1ms to disable timeout", + iae.getMessage() + ); + + // Valid setting with timeout = -1 + build = Settings.builder().put(PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY, "-1").build(); + assertEquals(-1, PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING.get(build).getMillis()); + } + + public void testReplicaAllocatorTimeout() { + // Valid setting with timeout = 20s + Settings build = Settings.builder().put(REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY, "20s").build(); + assertEquals(20, REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING.get(build).getSeconds()); + + // Valid setting with timeout > 20s + build = Settings.builder().put(REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY, "30000ms").build(); + assertEquals(30, REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING.get(build).getSeconds()); + + // Invalid setting with timeout < 20s + Settings lessThan20sSetting = Settings.builder().put(REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY, "10s").build(); + IllegalArgumentException iae = expectThrows( + IllegalArgumentException.class, + () -> REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING.get(lessThan20sSetting) + ); + assertEquals( + "Setting [" + REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING.getKey() + "] should be more than 20s or -1ms to disable timeout", + iae.getMessage() + ); + + // Valid setting with timeout = -1 + build = Settings.builder().put(REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY, "-1").build(); + assertEquals(-1, REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING.get(build).getMillis()); + } + private void createIndexAndUpdateClusterState(int count, int numberOfShards, int numberOfReplicas) { if (count == 0) return; Metadata.Builder metadata = Metadata.builder(); From f977f196c48570f3b16f305b39becae6838e7271 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Tue, 30 Jul 2024 12:31:33 -0700 Subject: [PATCH 32/68] Fix test RestStatusTests.testStatusReturnsFailureStatusWhenFailuresExist (#15011) This test has a reproducible failure when the highest "failure" status is 100 level. This happens because RestStatus.status treats these as OK. Signed-off-by: Marc Handalian --- .../src/test/java/org/opensearch/core/RestStatusTests.java | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/server/src/test/java/org/opensearch/core/RestStatusTests.java b/server/src/test/java/org/opensearch/core/RestStatusTests.java index f8dba99aa8b60..fbd238bd035d0 100644 --- a/server/src/test/java/org/opensearch/core/RestStatusTests.java +++ b/server/src/test/java/org/opensearch/core/RestStatusTests.java @@ -55,7 +55,11 @@ public void testStatusReturnsFailureStatusWhenFailuresExist() { heapOfFailures.add(failure); } - assertEquals(heapOfFailures.peek().status(), RestStatus.status(successfulShards, totalShards, failures)); + final RestStatus status = heapOfFailures.peek().status(); + // RestStatus.status will return RestStatus.OK when the highest failure code is 100 level. + final RestStatus expected = status.getStatusFamilyCode() == 1 ? RestStatus.OK : status; + + assertEquals(expected, RestStatus.status(successfulShards, totalShards, failures)); } public void testSerialization() throws IOException { From eb306d2bab43de789b59adc01265c683a8fb69fb Mon Sep 17 00:00:00 2001 From: Kaushal Kumar Date: Tue, 30 Jul 2024 15:26:44 -0700 Subject: [PATCH 33/68] Add queryGroupId to search workload tasks at co-ordinator and data node level (#14708) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * add logic to add headers to Task Signed-off-by: Kaushal Kumar * add logic to add queryGroupId to task headers Signed-off-by: Kaushal Kumar * remove redundant code Signed-off-by: Kaushal Kumar * add changelog entry Signed-off-by: Kaushal Kumar * address comments Signed-off-by: Kaushal Kumar * fix precommit Signed-off-by: Kaushal Kumar * Add UTs for RemoteIndexMetadataManager (#14660) Signed-off-by: Shivansh Arora Co-authored-by: Arpit-Bandejiya Signed-off-by: Kaushal Kumar * Fix match_phrase_prefix_query not working on text field with multiple values and index_prefixes (#10959) * Fix match_phrase_prefix_query not working on text field with multiple values and index_prefixes Signed-off-by: Gao Binlong * Add more test Signed-off-by: Gao Binlong * modify change log Signed-off-by: Gao Binlong * Fix test failure Signed-off-by: Gao Binlong * Change the indexAnalyzer used by prefix field Signed-off-by: Gao Binlong * Skip old version for yaml test Signed-off-by: Gao Binlong * Optimize some code Signed-off-by: Gao Binlong * Fix test failure Signed-off-by: Gao Binlong * Modify yaml test description Signed-off-by: Gao Binlong * Remove the name parameter for setAnalyzer() Signed-off-by: Gao Binlong --------- Signed-off-by: Gao Binlong Signed-off-by: Kaushal Kumar * Offline calculation of total shard per node and caching it for weight calculation inside LocalShardBalancer (#14675) Signed-off-by: RS146BIJAY Signed-off-by: Kaushal Kumar * [bug fix] validate lower bound for top n size (#14587) Signed-off-by: Chenyang Ji Signed-off-by: Kaushal Kumar * Create SystemIndexRegistry with helper method matchesSystemIndex (#14415) * Create new extension point in SystemIndexPlugin for a single plugin to get registered system indices Signed-off-by: Craig Perkins * Add to CHANGELOG Signed-off-by: Craig Perkins * WIP on system indices from IndexNameExpressionResolver Signed-off-by: Craig Perkins * Add test in IndexNameExpressionResolverTests Signed-off-by: Craig Perkins * Remove changes in SystemIndexPlugin Signed-off-by: Craig Perkins * Add method in IndexNameExpressionResolver to get matching system indices Signed-off-by: Craig Perkins * Show how resolver can be chained to get system indices Signed-off-by: Craig Perkins * Fix forbiddenApis check Signed-off-by: Craig Perkins * Update CHANGELOG Signed-off-by: Craig Perkins * Make SystemIndices internal Signed-off-by: Craig Perkins * Remove unneeded changes Signed-off-by: Craig Perkins * Fix CI failures Signed-off-by: Craig Perkins * Fix precommit errors Signed-off-by: Craig Perkins * Use Regex instead of WildcardMatcher Signed-off-by: Craig Perkins * Address code review feedback Signed-off-by: Craig Perkins * Allow caller to pass index expressions Signed-off-by: Craig Perkins * Create SystemIndexRegistry Signed-off-by: Craig Perkins * Update CHANGELOG Signed-off-by: Craig Perkins * Remove singleton limitation Signed-off-by: Craig Perkins * Add javadoc Signed-off-by: Craig Perkins * Add @ExperimentalApi annotation Signed-off-by: Craig Perkins --------- Signed-off-by: Craig Perkins Signed-off-by: Kaushal Kumar * Refactor Grok validate pattern to iterative approach (#14206) * grok validate patterns recusrion to iterative Signed-off-by: Sandesh Kumar * Add max depth in resolving a pattern to avoid OOM Signed-off-by: Sandesh Kumar * change path from deque to arraylist Signed-off-by: Sandesh Kumar * rename queue to stack Signed-off-by: Sandesh Kumar * Change max depth to 500 Signed-off-by: Sandesh Kumar * typo originPatternName fix Signed-off-by: Sandesh Kumar * spotless Signed-off-by: Sandesh Kumar --------- Signed-off-by: Sandesh Kumar Signed-off-by: Kaushal Kumar * Bump opentelemetry from 1.39.0 to 1.40.0 (#14674) Signed-off-by: Andriy Redko Signed-off-by: Kaushal Kumar * Bump jackson from 2.17.1 to 2.17.2 (#14687) Signed-off-by: Andriy Redko Signed-off-by: Kaushal Kumar * Add release notes for release 1.3.18 (#14699) Signed-off-by: Zelin Hao Signed-off-by: Kaushal Kumar * Bump reactor from 3.5.19 to 3.5.20 (#14697) Signed-off-by: Andriy Redko Signed-off-by: Kaushal Kumar * Add unit tests for read flow of RemoteClusterStateService and bug fix for transient settings (#14476) Signed-off-by: Shivansh Arora Signed-off-by: Kaushal Kumar * Update version check for the bug fix of match_phrase_prefix_query not working on text field with multiple values and index_prefixes (#14703) Signed-off-by: Gao Binlong Signed-off-by: Kaushal Kumar * Remove unnecessary cast to int from test (#14696) Signed-off-by: Lukáš Vlček Signed-off-by: Kaushal Kumar * print reason why parent task was cancelled (#14604) Signed-off-by: kkewwei Signed-off-by: Kaushal Kumar * Use set of shard routing for shard in unassigned shard batch check. (#14533) Signed-off-by: Swetha Guptha Signed-off-by: Kaushal Kumar * Add versioning for UploadedIndexMetadata (#14677) * Add versioning for UploadedIndexMetadata * Handle componentPrefix for backward compatibility Signed-off-by: Sooraj Sinha Signed-off-by: Kaushal Kumar * Fix: update help output for _cat (#14722) * fixed help output for _cat Signed-off-by: ahmedsobeh * updated changelog Signed-off-by: ahmedsobeh * updated changelog Signed-off-by: ahmedsobeh --------- Signed-off-by: ahmedsobeh Signed-off-by: Kaushal Kumar * Fix hdfs-fixture kerb-admin & hadoop-minicluster dependencies are not being updated / false positive reports on CVEs (#14729) Signed-off-by: Andriy Redko Signed-off-by: Kaushal Kumar * Update to Gradle 8.9 (#14574) Signed-off-by: Andriy Redko Signed-off-by: Kaushal Kumar * Fix hdfs-fixture hadoop-minicluster dependencies are not being updated / false positive reports on CVEs (#14732) Signed-off-by: Andriy Redko Signed-off-by: Kaushal Kumar * Add `strict_allow_templates` dynamic mapping option (#14555) * The dynamic mapping parameter supports strict_allow_templates Signed-off-by: Gao Binlong * Modify change log Signed-off-by: Gao Binlong * Modify skip version in yml test file Signed-off-by: Gao Binlong * Refactor some code Signed-off-by: Gao Binlong * Keep the old methods Signed-off-by: Gao Binlong * change public to private Signed-off-by: Gao Binlong * Optimize some code Signed-off-by: Gao Binlong * Do not override toString method for Dynamic Signed-off-by: Gao Binlong * Optimize some code and modify the changelog Signed-off-by: Gao Binlong --------- Signed-off-by: Gao Binlong Signed-off-by: Kaushal Kumar * Bump net.minidev:json-smart from 2.5.0 to 2.5.1 in /plugins/repository-azure (#14748) * Bump net.minidev:json-smart in /plugins/repository-azure Bumps [net.minidev:json-smart](https://github.com/netplex/json-smart-v2) from 2.5.0 to 2.5.1. - [Release notes](https://github.com/netplex/json-smart-v2/releases) - [Commits](https://github.com/netplex/json-smart-v2/compare/2.5.0...2.5.1) --- updated-dependencies: - dependency-name: net.minidev:json-smart dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Signed-off-by: Kaushal Kumar * remove query insights plugin from core (#14743) Signed-off-by: Chenyang Ji Signed-off-by: Kaushal Kumar * Add `strict_allow_templates` dynamic mapping option (#14555) (#14737) (#14742) * The dynamic mapping parameter supports strict_allow_templates * Modify change log * Modify skip version in yml test file * Refactor some code * Keep the old methods * change public to private * Optimize some code * Do not override toString method for Dynamic * Optimize some code and modify the changelog --------- (cherry picked from commit 6b8b3efe01a62c221f308a2e3b019d75a7f5ad8a) Signed-off-by: Gao Binlong Signed-off-by: github-actions[bot] Signed-off-by: Andriy Redko Co-authored-by: opensearch-trigger-bot[bot] <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Co-authored-by: github-actions[bot] Signed-off-by: Kaushal Kumar * Fix create or update alias API doesn't throw exception for unsupported parameters (#14719) * Fix create or update alias API doesn't throw exception for unsupported parameters Signed-off-by: Gao Binlong * Update version check in yml test Signed-off-by: Gao Binlong * modify change log Signed-off-by: Gao Binlong --------- Signed-off-by: Gao Binlong Signed-off-by: Kaushal Kumar * Remove query categorization from core (#14759) * Remove query categorization from core Signed-off-by: Siddhant Deshmukh * Add changelog Signed-off-by: Siddhant Deshmukh * Trigger Build Signed-off-by: Siddhant Deshmukh --------- Signed-off-by: Siddhant Deshmukh Signed-off-by: Kaushal Kumar * Add changes to propagate queryGroupId across child requests and nodes (#14614) * add query group header propagator Signed-off-by: Kaushal Kumar * apply spotless check Signed-off-by: Kaushal Kumar * add new propagator in ThreadContext Signed-off-by: Kaushal Kumar * spotlessApply Signed-off-by: Kaushal Kumar * address comments Signed-off-by: Kaushal Kumar * Bump com.microsoft.azure:msal4j from 1.15.1 to 1.16.0 in /plugins/repository-azure (#14610) * Bump com.microsoft.azure:msal4j in /plugins/repository-azure Bumps [com.microsoft.azure:msal4j](https://github.com/AzureAD/microsoft-authentication-library-for-java) from 1.15.1 to 1.16.0. - [Release notes](https://github.com/AzureAD/microsoft-authentication-library-for-java/releases) - [Changelog](https://github.com/AzureAD/microsoft-authentication-library-for-java/blob/dev/changelog.txt) - [Commits](https://github.com/AzureAD/microsoft-authentication-library-for-java/compare/v1.15.1...v1.16.0) --- updated-dependencies: - dependency-name: com.microsoft.azure:msal4j dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Signed-off-by: Kaushal Kumar * [Bugfix] Fix ICacheKeySerializerTests flakiness (#14564) * Fix testInvalidInput flakiness Signed-off-by: Peter Alfonsi * Addressed andrross's comment Signed-off-by: Peter Alfonsi * rerun security check Signed-off-by: Peter Alfonsi --------- Signed-off-by: Peter Alfonsi Co-authored-by: Peter Alfonsi Signed-off-by: Kaushal Kumar * Correct typo in method name (#14621) Signed-off-by: vatsal Signed-off-by: Kaushal Kumar * Refactoring FilterPath.parse by using an iterative approach instead of recursion. (#14200) * Refactor FilterPath parse function (#12067) Signed-off-by: Robin Friedmann * Implement unit tests for FilterPathTests (#12067) Signed-off-by: Robin Friedmann * Write warn log if Filter is empty; Add comments (#12067) Signed-off-by: Robin Friedmann * Add changelog Signed-off-by: Siddhant Deshmukh * Remove unnecessary log statement Signed-off-by: Siddhant Deshmukh * Remove unused logger Signed-off-by: Siddhant Deshmukh * Spotless apply Signed-off-by: Siddhant Deshmukh * Remove incorrect changelog Signed-off-by: Siddhant Deshmukh --------- Signed-off-by: Siddhant Deshmukh Co-authored-by: Robin Friedmann Signed-off-by: Kaushal Kumar * Removing String format in RemoteStoreMigrationAllocationDecider to optimise performance(#14612) Signed-off-by: RS146BIJAY Signed-off-by: Kaushal Kumar * Clear templates before Adding; Use NamedWriteableAwareStreamInput for RemoteCustomMetadata; Correct the check for deciding upload of HashesOfConsistentSettings (#14513) * Clear templates before Adding; Use NamedWriteableAwareStreamInput for RemoteCustomMetadata * Correct the check for deciding upload of hashes of consistent settings Signed-off-by: Sooraj Sinha Signed-off-by: Kaushal Kumar * add changelog Signed-off-by: Kaushal Kumar * add PR link changelog Signed-off-by: Kaushal Kumar * Improve reroute performance by optimising List.removeAll in LocalShardsBalancer to filter remote search shard from relocation decision (#14613) Signed-off-by: RS146BIJAY Signed-off-by: Kaushal Kumar * Fix assertion failure while deleting remote backed index (#14601) Signed-off-by: Sachin Kale Signed-off-by: Kaushal Kumar * Allow system index warning in OpenSearchRestTestCase.refreshAllIndices (#14635) * Allow system index warning Signed-off-by: Craig Perkins * Add to CHANGELOG Signed-off-by: Craig Perkins * Address code review comments Signed-off-by: Craig Perkins --------- Signed-off-by: Craig Perkins Signed-off-by: Kaushal Kumar * Star tree codec changes (#14514) --------- Signed-off-by: Bharathwaj G Signed-off-by: Kaushal Kumar * Bump com.github.spullara.mustache.java:compiler from 0.9.13 to 0.9.14 in /modules/lang-mustache (#14672) * Bump com.github.spullara.mustache.java:compiler Bumps [com.github.spullara.mustache.java:compiler](https://github.com/spullara/mustache.java) from 0.9.13 to 0.9.14. - [Commits](https://github.com/spullara/mustache.java/compare/mustache.java-0.9.13...mustache.java-0.9.14) --- updated-dependencies: - dependency-name: com.github.spullara.mustache.java:compiler dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Signed-off-by: Kaushal Kumar * Bump net.minidev:accessors-smart from 2.5.0 to 2.5.1 in /plugins/repository-azure (#14673) * Bump net.minidev:accessors-smart in /plugins/repository-azure Bumps [net.minidev:accessors-smart](https://github.com/netplex/json-smart-v2) from 2.5.0 to 2.5.1. - [Release notes](https://github.com/netplex/json-smart-v2/releases) - [Commits](https://github.com/netplex/json-smart-v2/compare/2.5.0...2.5.1) --- updated-dependencies: - dependency-name: net.minidev:accessors-smart dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Signed-off-by: Kaushal Kumar * move query group thread context propagator out of ThreadContext Signed-off-by: Kaushal Kumar --------- Signed-off-by: Kaushal Kumar Signed-off-by: dependabot[bot] Signed-off-by: Peter Alfonsi Signed-off-by: vatsal Signed-off-by: Siddhant Deshmukh Signed-off-by: RS146BIJAY Signed-off-by: Sooraj Sinha Signed-off-by: Sachin Kale Signed-off-by: Craig Perkins Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Co-authored-by: Peter Alfonsi Co-authored-by: Peter Alfonsi Co-authored-by: Vatsal <36672090+imvtsl@users.noreply.github.com> Co-authored-by: Siddhant Deshmukh Co-authored-by: Robin Friedmann Co-authored-by: rishavz_sagar Co-authored-by: Sooraj Sinha <81695996+soosinha@users.noreply.github.com> Co-authored-by: Sachin Kale Co-authored-by: Craig Perkins Co-authored-by: Bharathwaj G Signed-off-by: Kaushal Kumar * Add consumers to remote store based index settings (#14764) Signed-off-by: Shourya Dutta Biswas <114977491+shourya035@users.noreply.github.com> Signed-off-by: Kaushal Kumar * Add matchesPluginSystemIndexPattern to SystemIndexRegistry (#14750) * Add matchesPluginSystemIndexPattern to SystemIndexRegistry Signed-off-by: Craig Perkins * Add to CHANGELOG Signed-off-by: Craig Perkins * Use single data structure to keep track of system indices Signed-off-by: Craig Perkins * Address code review comments Signed-off-by: Craig Perkins * Add test for getAllDescriptors Signed-off-by: Craig Perkins * Update server/src/main/java/org/opensearch/indices/SystemIndexRegistry.java Co-authored-by: Andriy Redko Signed-off-by: Craig Perkins --------- Signed-off-by: Craig Perkins Signed-off-by: Craig Perkins Co-authored-by: Andriy Redko Signed-off-by: Kaushal Kumar * SPI for loading ABC templates (#14659) * SPI for loading ABC templates Signed-off-by: mgodwan Signed-off-by: Kaushal Kumar * Fix bulk upsert ignores the default_pipeline and final_pipeline when the auto-created index matches the index template (#12891) * Fix bulk upsert ignores the default_pipeline and final_pipeline when auto-created index matches with the index template Signed-off-by: Gao Binlong * Modify changelog & comment Signed-off-by: Gao Binlong * Use new approach Signed-off-by: Gao Binlong * Fix test failure Signed-off-by: Gao Binlong --------- Signed-off-by: Gao Binlong Signed-off-by: Kaushal Kumar * Fix flaky test due to node being used across all tests (#14787) Signed-off-by: Mohit Godwani Signed-off-by: Kaushal Kumar * Star Tree Implementation [OnHeap] (#14512) --------- Signed-off-by: Sarthak Aggarwal Signed-off-by: Kaushal Kumar * Add Gao Binlong as maintainer (#14796) Signed-off-by: Andriy Redko Signed-off-by: Kaushal Kumar * Clear ehcache disk cache files during initialization (#14738) * Clear ehcache disk cache files during initialization Signed-off-by: Sagar Upadhyaya * Adding UT to fix line coverage Signed-off-by: Sagar Upadhyaya * Addressing comment Signed-off-by: Sagar Upadhyaya * Adding more Uts for better line coverage Signed-off-by: Sagar Upadhyaya * Throwing exception in case we fail to clear cache files during startup Signed-off-by: Sagar Upadhyaya * Adding more UTs Signed-off-by: Sagar Upadhyaya * Adding a UT for more coverage Signed-off-by: Sagar Upadhyaya * Fixing gradle build Signed-off-by: Sagar Upadhyaya * Update ehcache disk cache close() logic Signed-off-by: Sagar Upadhyaya --------- Signed-off-by: Sagar Upadhyaya Signed-off-by: Kaushal Kumar * Refactor remote-routing-table service inline with remote state interfaces (#14668) --------- Signed-off-by: Arpit Bandejiya Signed-off-by: Arpit-Bandejiya Signed-off-by: Kaushal Kumar * Set version to 2.15 for determining metadata during migration to remote store Signed-off-by: Sandeep Kumawat Co-authored-by: Sandeep Kumawat Signed-off-by: Kaushal Kumar * Fix bulk upsert ignores the default_pipeline and final_pipeline when the auto-created index matches the index template (#14793) Signed-off-by: Andriy Redko Signed-off-by: Kaushal Kumar * Fix create or update alias API doesn't throw exception for unsupported parameters (#14769) Signed-off-by: Andriy Redko Signed-off-by: Kaushal Kumar * Change RCSS info logs to debug (#14814) Signed-off-by: Shivansh Arora Signed-off-by: Kaushal Kumar * [Bugfix] Fix NPE in ReplicaShardAllocator (#13993) (#14385) * [Bugfix] Fix NPE in ReplicaShardAllocator (#13993) Signed-off-by: Daniil Roman * Add fix info to CHANGELOG.md Signed-off-by: Daniil Roman --------- Signed-off-by: Daniil Roman Signed-off-by: Daniil Roman Signed-off-by: Kaushal Kumar * Run performance benchmark on pull requests (#14760) * add performance benchmark workflow for pull requests Signed-off-by: Rishabh Singh * Update PERFORMANCE_BENCHMARKS.md Co-authored-by: Andriy Redko Signed-off-by: Rishabh Singh * Update PERFORMANCE_BENCHMARKS.md Co-authored-by: Andriy Redko Signed-off-by: Rishabh Singh * Update .github/workflows/benchmark-pull-request.yml Co-authored-by: Andriy Redko Signed-off-by: Rishabh Singh * Update .github/workflows/benchmark-pull-request.yml Co-authored-by: Andriy Redko Signed-off-by: Rishabh Singh * Update .github/workflows/benchmark-pull-request.yml Co-authored-by: Andriy Redko Signed-off-by: Rishabh Singh * Update .github/workflows/benchmark-pull-request.yml Co-authored-by: Andriy Redko Signed-off-by: Rishabh Singh --------- Signed-off-by: Rishabh Singh Signed-off-by: Rishabh Singh Co-authored-by: Andriy Redko Signed-off-by: Kaushal Kumar * fix constant_keyword field type (#14807) Signed-off-by: kkewwei test Signed-off-by: Daniel (dB.) Doubrovkine Co-authored-by: Daniel (dB.) Doubrovkine Signed-off-by: Kaushal Kumar * [Remote Store Migration] Reconcile remote store based index settings during STRICT mode switch (#14792) Signed-off-by: Shourya Dutta Biswas <114977491+shourya035@users.noreply.github.com> Signed-off-by: Kaushal Kumar * Add prefix mode verification setting for repository verification (#14790) * Add prefix mode verification setting for repository verification Signed-off-by: Ashish Singh * Add UTs and randomise prefix mode repository verification Signed-off-by: Ashish Singh * Incorporate PR review feedback Signed-off-by: Ashish Singh --------- Signed-off-by: Ashish Singh Signed-off-by: Kaushal Kumar * add length check on comment body for benchmark workflow (#14834) Signed-off-by: Rishabh Singh Signed-off-by: Kaushal Kumar * Add restore-from-snapshot test procedure for snapshot run benchmark config (#14842) Signed-off-by: Rishabh Singh Signed-off-by: Kaushal Kumar * Fix env variable name typo (#14843) Signed-off-by: Rishabh Singh Signed-off-by: Kaushal Kumar * Use circuit breaker in InternalHistogram when adding empty buckets (#14754) * introduce circuit breaker in InternalHistogram Signed-off-by: bowenlan-amzn * use circuit breaker from reduce context Signed-off-by: bowenlan-amzn * add test Signed-off-by: bowenlan-amzn * revert use_real_memory change in OpenSearchNode Signed-off-by: bowenlan-amzn * add change log Signed-off-by: bowenlan-amzn --------- Signed-off-by: bowenlan-amzn Signed-off-by: Kaushal Kumar * [Remote State] Create interface RemoteEntitiesManager (#14671) * Create interface RemoteEntitiesManager Signed-off-by: Shivansh Arora Signed-off-by: Kaushal Kumar * Optimise TransportNodesAction to not send DiscoveryNodes for NodeStat… (#14749) * Optimize TransportNodesAction to not send DiscoveryNodes for NodeStats, NodesInfo and ClusterStats call Signed-off-by: Pranshu Shukla Signed-off-by: Kaushal Kumar * Enabling term version check on local state for all ClusterManager Read Transport Actions (#14273) * enabling term version check on local state for all admin read actions Signed-off-by: Rajiv Kumar Vaidyanathan Signed-off-by: Kaushal Kumar * Reduce logging in DEBUG for MasterService:run (#14795) * Reduce logging in DEBUG for MasteService:run by introducing short and long summary in Taskbatcher Signed-off-by: Sumit Bansal Signed-off-by: Kaushal Kumar * Add SplitResponseProcessor to Search Pipelines (#14800) * Add SplitResponseProcessor for search pipelines Signed-off-by: Daniel Widdis * Register the split processor factory Signed-off-by: Daniel Widdis * Address code review comments Signed-off-by: Daniel Widdis * Avoid list copy by casting array Signed-off-by: Daniel Widdis --------- Signed-off-by: Daniel Widdis Signed-off-by: Kaushal Kumar * Add integration tests for RemoteRoutingTable Service. (#14631) Signed-off-by: Shailendra Singh Signed-off-by: Kaushal Kumar * Add SortResponseProcessor to Search Pipelines (#14785) * Add SortResponseProcessor for search pipelines Signed-off-by: Daniel Widdis * Add stupid and unnecessary javadocs to satisfy overly strict CI Signed-off-by: Daniel Widdis * Split casting and sorting methods for readability Signed-off-by: Daniel Widdis * Register the sort processor factory Signed-off-by: Daniel Widdis * Address code review comments Signed-off-by: Daniel Widdis * Cast individual list elements to avoid creating two lists Signed-off-by: Daniel Widdis * Add yamlRestTests Signed-off-by: Daniel Widdis * Clarify why there's unusual sorting Signed-off-by: Daniel Widdis * Use instanceof instead of isAssignableFrom Signed-off-by: Daniel Widdis --------- Signed-off-by: Daniel Widdis Signed-off-by: Kaushal Kumar * Fix allowUnmappedFields, mapUnmappedFieldAsString settings to be applied when parsing query string query (#13957) * Modify to invoke QueryShardContext.fieldMapper() method to apply allowUnmappedFields and mapUnmappedFieldAsString settings Signed-off-by: imyp92 * Add test cases to verify returning 400 responses if unmapped fields are included for some types of query Signed-off-by: imyp92 * Add changelog Signed-off-by: imyp92 --------- Signed-off-by: imyp92 Signed-off-by: gaobinlong Co-authored-by: gaobinlong Signed-off-by: Kaushal Kumar * Bump com.microsoft.azure:msal4j from 1.16.0 to 1.16.1 in /plugins/repository-azure (#14857) * Bump com.microsoft.azure:msal4j in /plugins/repository-azure Bumps [com.microsoft.azure:msal4j](https://github.com/AzureAD/microsoft-authentication-library-for-java) from 1.16.0 to 1.16.1. - [Release notes](https://github.com/AzureAD/microsoft-authentication-library-for-java/releases) - [Changelog](https://github.com/AzureAD/microsoft-authentication-library-for-java/blob/dev/changelog.txt) - [Commits](https://github.com/AzureAD/microsoft-authentication-library-for-java/compare/v1.16.0...v1.16.1) --- updated-dependencies: - dependency-name: com.microsoft.azure:msal4j dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Signed-off-by: Kaushal Kumar * Bump com.gradle.develocity from 3.17.5 to 3.17.6 (#14856) * Bump com.gradle.develocity from 3.17.5 to 3.17.6 Bumps com.gradle.develocity from 3.17.5 to 3.17.6. --- updated-dependencies: - dependency-name: com.gradle.develocity dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Signed-off-by: Kaushal Kumar * Bump org.jline:jline in /test/fixtures/hdfs-fixture (#14859) Bumps [org.jline:jline](https://github.com/jline/jline3) from 3.26.2 to 3.26.3. - [Release notes](https://github.com/jline/jline3/releases) - [Changelog](https://github.com/jline/jline3/blob/master/changelog.md) - [Commits](https://github.com/jline/jline3/compare/jline-parent-3.26.2...jline-parent-3.26.3) --- updated-dependencies: - dependency-name: org.jline:jline dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Kaushal Kumar * Use Lucene provided Persian stem (#14847) Lucene provided Persian stem apparently isn't hooked yet and this change is doing that based on what is done for Arabic stem support. Signed-off-by: Ebrahim Byagowi Signed-off-by: Daniel (dB.) Doubrovkine Co-authored-by: Daniel (dB.) Doubrovkine Signed-off-by: Kaushal Kumar * Bump actions/checkout from 2 to 4 (#14858) * Bump actions/checkout from 2 to 4 Bumps [actions/checkout](https://github.com/actions/checkout) from 2 to 4. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v2...v4) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Signed-off-by: Kaushal Kumar * Deprecate batch_size parameter on bulk API (#14725) By default the full _bulk payload will be passed to ingest processors as a batch, with any sub batching logic to be implemented by each processor if necessary. Signed-off-by: Liyun Xiu Signed-off-by: Kaushal Kumar * Add perms for remote snapshot cache eviction on scripted query (#14411) Signed-off-by: Finn Carroll Signed-off-by: Kaushal Kumar * add transport interceptor to populate queryGroupId in task headers Signed-off-by: Kaushal Kumar * Add rest, transport layer changes for Hot to warm tiering - dedicated setup (#13980) Signed-off-by: Neetika Singhal Signed-off-by: Kaushal Kumar * Create listener to refresh search thread resource usage (#14832) * [bug fix] fix incorrect coordinator node search resource usages Signed-off-by: Chenyang Ji * fix bug on serialization when passing task resource usage to coordinator Signed-off-by: Chenyang Ji * add more unit tests Signed-off-by: Chenyang Ji * remove query insights plugin related code Signed-off-by: Chenyang Ji * create per request listener to refresh task resource usage Signed-off-by: Chenyang Ji * Make new listener API public Signed-off-by: Siddhant Deshmukh * Add changelog Signed-off-by: Siddhant Deshmukh * Remove wrong files added Signed-off-by: Siddhant Deshmukh * Address review comments Signed-off-by: Siddhant Deshmukh * Build fix Signed-off-by: Siddhant Deshmukh * Make singleton Signed-off-by: Siddhant Deshmukh * Address review comments Signed-off-by: Siddhant Deshmukh * Make sure listener runs before plugin listeners Signed-off-by: Siddhant Deshmukh * Spotless Signed-off-by: Siddhant Deshmukh * Minor fix Signed-off-by: Siddhant Deshmukh --------- Signed-off-by: Chenyang Ji Signed-off-by: Siddhant Deshmukh Signed-off-by: Jay Deng Co-authored-by: Chenyang Ji Co-authored-by: Jay Deng Signed-off-by: Kaushal Kumar * Caching avg total bytes and avg free bytes inside ClusterInfo (#14851) Signed-off-by: RS146BIJAY Signed-off-by: Kaushal Kumar * Use default value when index.number_of_replicas is null (#14812) * Use default value when index.number_of_replicas is null Signed-off-by: Liyun Xiu * Add integration test Signed-off-by: Liyun Xiu * Add changelog Signed-off-by: Liyun Xiu --------- Signed-off-by: Liyun Xiu Signed-off-by: Kaushal Kumar * [Remote Routing Table] Implement write and read flow for shard diff file. (#14684) * Implement write and read flow to upload/download shard diff file. Signed-off-by: Shailendra Singh Signed-off-by: Kaushal Kumar * Optimized ClusterStatsIndices to precomute shard stats (#14426) * Optimize Cluster Stats Indices to precomute node level stats Signed-off-by: Pranshu Shukla Signed-off-by: Kaushal Kumar * Fix constraint bug which allows more primary shards than average primary shards per index (#14908) Signed-off-by: Gaurav Bafna Signed-off-by: Kaushal Kumar * Optmising AwarenessAllocationDecider for hashmap.get call (#14761) Signed-off-by: RS146BIJAY Signed-off-by: Kaushal Kumar * update comment Signed-off-by: Kaushal Kumar * Fix IngestServiceTests.testBulkRequestExecutionWithFailures (#14918) The test would previously fail if the randomness led to only a single indexing request being included in the bulk payload. This change guarantees multiple indexing requests in order to ensure the batch logic kicks in. Also replace some unneeded mocks with real classes. Signed-off-by: Andrew Ross Signed-off-by: Kaushal Kumar * add queryGroupTask Signed-off-by: Kaushal Kumar * remove unnecessary imports Signed-off-by: Kaushal Kumar * add QueryGroupTask tests Signed-off-by: Kaushal Kumar * rename WLM transport request handler Signed-off-by: Kaushal Kumar * add CHANGELOG entry Signed-off-by: Kaushal Kumar * fix ut Signed-off-by: Kaushal Kumar * address comments Signed-off-by: Kaushal Kumar * fix UT to remove the verify for final method Signed-off-by: Kaushal Kumar * apply spotless Signed-off-by: Kaushal Kumar --------- Signed-off-by: Kaushal Kumar Signed-off-by: Shivansh Arora Signed-off-by: Gao Binlong Signed-off-by: RS146BIJAY Signed-off-by: Chenyang Ji Signed-off-by: Craig Perkins Signed-off-by: Sandesh Kumar Signed-off-by: Andriy Redko Signed-off-by: Zelin Hao Signed-off-by: Lukáš Vlček Signed-off-by: kkewwei Signed-off-by: Swetha Guptha Signed-off-by: Sooraj Sinha Signed-off-by: ahmedsobeh Signed-off-by: dependabot[bot] Signed-off-by: github-actions[bot] Signed-off-by: Siddhant Deshmukh Signed-off-by: Peter Alfonsi Signed-off-by: vatsal Signed-off-by: Sachin Kale Signed-off-by: Shourya Dutta Biswas <114977491+shourya035@users.noreply.github.com> Signed-off-by: Craig Perkins Signed-off-by: mgodwan Signed-off-by: Mohit Godwani Signed-off-by: Sagar Upadhyaya Signed-off-by: Sandeep Kumawat Signed-off-by: Daniil Roman Signed-off-by: Daniil Roman Signed-off-by: Rishabh Singh Signed-off-by: Rishabh Singh Signed-off-by: Daniel (dB.) Doubrovkine Signed-off-by: Ashish Singh Signed-off-by: bowenlan-amzn Signed-off-by: Pranshu Shukla Signed-off-by: Rajiv Kumar Vaidyanathan Signed-off-by: Sumit Bansal Signed-off-by: Daniel Widdis Signed-off-by: Shailendra Singh Signed-off-by: imyp92 Signed-off-by: gaobinlong Signed-off-by: Ebrahim Byagowi Signed-off-by: Liyun Xiu Signed-off-by: Finn Carroll Signed-off-by: Neetika Singhal Signed-off-by: Jay Deng Signed-off-by: Gaurav Bafna Signed-off-by: Andrew Ross Co-authored-by: Shivansh Arora Co-authored-by: Arpit-Bandejiya Co-authored-by: gaobinlong Co-authored-by: rishavz_sagar Co-authored-by: Chenyang Ji Co-authored-by: Craig Perkins Co-authored-by: Sandesh Kumar Co-authored-by: Andriy Redko Co-authored-by: Zelin Hao Co-authored-by: Lukáš Vlček Co-authored-by: kkewwei Co-authored-by: SwethaGuptha <156877431+SwethaGuptha@users.noreply.github.com> Co-authored-by: Sooraj Sinha <81695996+soosinha@users.noreply.github.com> Co-authored-by: Ahmed Sobeh Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Co-authored-by: opensearch-trigger-bot[bot] <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Co-authored-by: github-actions[bot] Co-authored-by: Siddhant Deshmukh Co-authored-by: Peter Alfonsi Co-authored-by: Peter Alfonsi Co-authored-by: Vatsal <36672090+imvtsl@users.noreply.github.com> Co-authored-by: Robin Friedmann Co-authored-by: Sachin Kale Co-authored-by: Bharathwaj G Co-authored-by: Shourya Dutta Biswas <114977491+shourya035@users.noreply.github.com> Co-authored-by: Craig Perkins Co-authored-by: Andriy Redko Co-authored-by: Mohit Godwani <81609427+mgodwan@users.noreply.github.com> Co-authored-by: Sarthak Aggarwal Co-authored-by: Sagar <99425694+sgup432@users.noreply.github.com> Co-authored-by: Sandeep Kumawat <2025sandeepkumawat@gmail.com> Co-authored-by: Sandeep Kumawat Co-authored-by: Daniil Roman Co-authored-by: Rishabh Singh Co-authored-by: kkewwei Co-authored-by: Daniel (dB.) Doubrovkine Co-authored-by: Ashish Singh Co-authored-by: bowenlan-amzn Co-authored-by: Pranshu Shukla <55992439+Pranshu-S@users.noreply.github.com> Co-authored-by: rajiv-kv <157019998+rajiv-kv@users.noreply.github.com> Co-authored-by: Sumit Bansal Co-authored-by: Daniel Widdis Co-authored-by: shailendra0811 <167273922+shailendra0811@users.noreply.github.com> Co-authored-by: Park, Yeongwu Co-authored-by: ebraminio Co-authored-by: Liyun Xiu Co-authored-by: Finn Co-authored-by: Neetika Singhal Co-authored-by: Jay Deng Co-authored-by: Gaurav Bafna <85113518+gbbafna@users.noreply.github.com> Co-authored-by: Andrew Ross --- CHANGELOG.md | 1 + .../action/search/SearchShardTask.java | 4 +- .../opensearch/action/search/SearchTask.java | 4 +- .../action/search/TransportSearchAction.java | 7 ++ .../main/java/org/opensearch/node/Node.java | 10 ++- .../org/opensearch/wlm/QueryGroupTask.java | 76 +++++++++++++++++++ ...orkloadManagementTransportInterceptor.java | 64 ++++++++++++++++ .../opensearch/wlm/QueryGroupTaskTests.java | 44 +++++++++++ ...adManagementTransportInterceptorTests.java | 40 ++++++++++ ...anagementTransportRequestHandlerTests.java | 75 ++++++++++++++++++ 10 files changed, 320 insertions(+), 5 deletions(-) create mode 100644 server/src/main/java/org/opensearch/wlm/QueryGroupTask.java create mode 100644 server/src/main/java/org/opensearch/wlm/WorkloadManagementTransportInterceptor.java create mode 100644 server/src/test/java/org/opensearch/wlm/QueryGroupTaskTests.java create mode 100644 server/src/test/java/org/opensearch/wlm/WorkloadManagementTransportInterceptorTests.java create mode 100644 server/src/test/java/org/opensearch/wlm/WorkloadManagementTransportRequestHandlerTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index a5a3e9c60b664..a5355f010a99f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added - Fix for hasInitiatedFetching to fix allocation explain and manual reroute APIs (([#14972](https://github.com/opensearch-project/OpenSearch/pull/14972)) +- [Workload Management] Add queryGroupId to Task ([14708](https://github.com/opensearch-project/OpenSearch/pull/14708)) - Add basic aggregation support for derived fields ([#14618](https://github.com/opensearch-project/OpenSearch/pull/14618)) ### Dependencies diff --git a/server/src/main/java/org/opensearch/action/search/SearchShardTask.java b/server/src/main/java/org/opensearch/action/search/SearchShardTask.java index dfecf4f462c4d..ed2943db94420 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchShardTask.java +++ b/server/src/main/java/org/opensearch/action/search/SearchShardTask.java @@ -37,8 +37,8 @@ import org.opensearch.core.tasks.TaskId; import org.opensearch.search.fetch.ShardFetchSearchRequest; import org.opensearch.search.internal.ShardSearchRequest; -import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.SearchBackpressureTask; +import org.opensearch.wlm.QueryGroupTask; import java.util.Map; import java.util.function.Supplier; @@ -50,7 +50,7 @@ * @opensearch.api */ @PublicApi(since = "1.0.0") -public class SearchShardTask extends CancellableTask implements SearchBackpressureTask { +public class SearchShardTask extends QueryGroupTask implements SearchBackpressureTask { // generating metadata in a lazy way since source can be quite big private final MemoizedSupplier metadataSupplier; diff --git a/server/src/main/java/org/opensearch/action/search/SearchTask.java b/server/src/main/java/org/opensearch/action/search/SearchTask.java index d3c1043c50cce..2a1a961e7607b 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchTask.java +++ b/server/src/main/java/org/opensearch/action/search/SearchTask.java @@ -35,8 +35,8 @@ import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.tasks.TaskId; -import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.SearchBackpressureTask; +import org.opensearch.wlm.QueryGroupTask; import java.util.Map; import java.util.function.Supplier; @@ -49,7 +49,7 @@ * @opensearch.api */ @PublicApi(since = "1.0.0") -public class SearchTask extends CancellableTask implements SearchBackpressureTask { +public class SearchTask extends QueryGroupTask implements SearchBackpressureTask { // generating description in a lazy way since source can be quite big private final Supplier descriptionSupplier; private SearchProgressListener progressListener = SearchProgressListener.NOOP; diff --git a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java index 7d3237d43cd5c..88bf7ebea8e52 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java @@ -101,6 +101,7 @@ import org.opensearch.transport.RemoteTransportException; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportService; +import org.opensearch.wlm.QueryGroupTask; import java.util.ArrayList; import java.util.Arrays; @@ -442,6 +443,12 @@ private void executeRequest( ); searchRequestContext.getSearchRequestOperationsListener().onRequestStart(searchRequestContext); + // At this point either the QUERY_GROUP_ID header will be present in ThreadContext either via ActionFilter + // or HTTP header (HTTP header will be deprecated once ActionFilter is implemented) + if (task instanceof QueryGroupTask) { + ((QueryGroupTask) task).setQueryGroupId(threadPool.getThreadContext()); + } + PipelinedRequest searchRequest; ActionListener listener; try { diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 448cb3627651c..8684b1b383cab 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -263,6 +263,7 @@ import org.opensearch.transport.TransportService; import org.opensearch.usage.UsageService; import org.opensearch.watcher.ResourceWatcherService; +import org.opensearch.wlm.WorkloadManagementTransportInterceptor; import javax.net.ssl.SNIHostName; @@ -1047,6 +1048,10 @@ protected Node( admissionControlService ); + WorkloadManagementTransportInterceptor workloadManagementTransportInterceptor = new WorkloadManagementTransportInterceptor( + threadPool + ); + final Collection secureSettingsFactories = pluginsService.filterPlugins(Plugin.class) .stream() .map(p -> p.getSecureSettingFactory(settings)) @@ -1054,7 +1059,10 @@ protected Node( .map(Optional::get) .collect(Collectors.toList()); - List transportInterceptors = List.of(admissionControlTransportInterceptor); + List transportInterceptors = List.of( + admissionControlTransportInterceptor, + workloadManagementTransportInterceptor + ); final NetworkModule networkModule = new NetworkModule( settings, pluginsService.filterPlugins(NetworkPlugin.class), diff --git a/server/src/main/java/org/opensearch/wlm/QueryGroupTask.java b/server/src/main/java/org/opensearch/wlm/QueryGroupTask.java new file mode 100644 index 0000000000000..4eb413be61b72 --- /dev/null +++ b/server/src/main/java/org/opensearch/wlm/QueryGroupTask.java @@ -0,0 +1,76 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.wlm; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.tasks.CancellableTask; + +import java.util.Map; +import java.util.Optional; +import java.util.function.Supplier; + +import static org.opensearch.search.SearchService.NO_TIMEOUT; + +/** + * Base class to define QueryGroup tasks + */ +public class QueryGroupTask extends CancellableTask { + + private static final Logger logger = LogManager.getLogger(QueryGroupTask.class); + public static final String QUERY_GROUP_ID_HEADER = "queryGroupId"; + public static final Supplier DEFAULT_QUERY_GROUP_ID_SUPPLIER = () -> "DEFAULT_QUERY_GROUP"; + private String queryGroupId; + + public QueryGroupTask(long id, String type, String action, String description, TaskId parentTaskId, Map headers) { + this(id, type, action, description, parentTaskId, headers, NO_TIMEOUT); + } + + public QueryGroupTask( + long id, + String type, + String action, + String description, + TaskId parentTaskId, + Map headers, + TimeValue cancelAfterTimeInterval + ) { + super(id, type, action, description, parentTaskId, headers, cancelAfterTimeInterval); + } + + /** + * This method should always be called after calling setQueryGroupId at least once on this object + * @return task queryGroupId + */ + public final String getQueryGroupId() { + if (queryGroupId == null) { + logger.warn("QueryGroup _id can't be null, It should be set before accessing it. This is abnormal behaviour "); + } + return queryGroupId; + } + + /** + * sets the queryGroupId from threadContext into the task itself, + * This method was defined since the queryGroupId can only be evaluated after task creation + * @param threadContext current threadContext + */ + public final void setQueryGroupId(final ThreadContext threadContext) { + this.queryGroupId = Optional.ofNullable(threadContext) + .map(threadContext1 -> threadContext1.getHeader(QUERY_GROUP_ID_HEADER)) + .orElse(DEFAULT_QUERY_GROUP_ID_SUPPLIER.get()); + } + + @Override + public boolean shouldCancelChildrenOnCancellation() { + return false; + } +} diff --git a/server/src/main/java/org/opensearch/wlm/WorkloadManagementTransportInterceptor.java b/server/src/main/java/org/opensearch/wlm/WorkloadManagementTransportInterceptor.java new file mode 100644 index 0000000000000..848df8712549a --- /dev/null +++ b/server/src/main/java/org/opensearch/wlm/WorkloadManagementTransportInterceptor.java @@ -0,0 +1,64 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.wlm; + +import org.opensearch.tasks.Task; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportChannel; +import org.opensearch.transport.TransportInterceptor; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportRequestHandler; + +/** + * This class is used to intercept search traffic requests and populate the queryGroupId header in task headers + */ +public class WorkloadManagementTransportInterceptor implements TransportInterceptor { + private final ThreadPool threadPool; + + public WorkloadManagementTransportInterceptor(ThreadPool threadPool) { + this.threadPool = threadPool; + } + + @Override + public TransportRequestHandler interceptHandler( + String action, + String executor, + boolean forceExecution, + TransportRequestHandler actualHandler + ) { + return new RequestHandler(threadPool, actualHandler); + } + + /** + * This class is mainly used to populate the queryGroupId header + * @param T is Search related request + */ + public static class RequestHandler implements TransportRequestHandler { + + private final ThreadPool threadPool; + TransportRequestHandler actualHandler; + + public RequestHandler(ThreadPool threadPool, TransportRequestHandler actualHandler) { + this.threadPool = threadPool; + this.actualHandler = actualHandler; + } + + @Override + public void messageReceived(T request, TransportChannel channel, Task task) throws Exception { + if (isSearchWorkloadRequest(task)) { + ((QueryGroupTask) task).setQueryGroupId(threadPool.getThreadContext()); + } + actualHandler.messageReceived(request, channel, task); + } + + boolean isSearchWorkloadRequest(Task task) { + return task instanceof QueryGroupTask; + } + } +} diff --git a/server/src/test/java/org/opensearch/wlm/QueryGroupTaskTests.java b/server/src/test/java/org/opensearch/wlm/QueryGroupTaskTests.java new file mode 100644 index 0000000000000..d292809c30124 --- /dev/null +++ b/server/src/test/java/org/opensearch/wlm/QueryGroupTaskTests.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.wlm; + +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; + +import java.util.Collections; + +import static org.opensearch.wlm.QueryGroupTask.DEFAULT_QUERY_GROUP_ID_SUPPLIER; +import static org.opensearch.wlm.QueryGroupTask.QUERY_GROUP_ID_HEADER; + +public class QueryGroupTaskTests extends OpenSearchTestCase { + private ThreadPool threadPool; + private QueryGroupTask sut; + + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool(getTestName()); + sut = new QueryGroupTask(123, "transport", "Search", "test task", null, Collections.emptyMap()); + } + + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdown(); + } + + public void testSuccessfulSetQueryGroupId() { + sut.setQueryGroupId(threadPool.getThreadContext()); + assertEquals(DEFAULT_QUERY_GROUP_ID_SUPPLIER.get(), sut.getQueryGroupId()); + + threadPool.getThreadContext().putHeader(QUERY_GROUP_ID_HEADER, "akfanglkaglknag2332"); + + sut.setQueryGroupId(threadPool.getThreadContext()); + assertEquals("akfanglkaglknag2332", sut.getQueryGroupId()); + } +} diff --git a/server/src/test/java/org/opensearch/wlm/WorkloadManagementTransportInterceptorTests.java b/server/src/test/java/org/opensearch/wlm/WorkloadManagementTransportInterceptorTests.java new file mode 100644 index 0000000000000..db4e5e45d49ed --- /dev/null +++ b/server/src/test/java/org/opensearch/wlm/WorkloadManagementTransportInterceptorTests.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.wlm; + +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportRequestHandler; +import org.opensearch.wlm.WorkloadManagementTransportInterceptor.RequestHandler; + +import static org.opensearch.threadpool.ThreadPool.Names.SAME; + +public class WorkloadManagementTransportInterceptorTests extends OpenSearchTestCase { + + private ThreadPool threadPool; + private WorkloadManagementTransportInterceptor sut; + + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool(getTestName()); + sut = new WorkloadManagementTransportInterceptor(threadPool); + } + + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdown(); + } + + public void testInterceptHandler() { + TransportRequestHandler requestHandler = sut.interceptHandler("Search", SAME, false, null); + assertTrue(requestHandler instanceof RequestHandler); + } +} diff --git a/server/src/test/java/org/opensearch/wlm/WorkloadManagementTransportRequestHandlerTests.java b/server/src/test/java/org/opensearch/wlm/WorkloadManagementTransportRequestHandlerTests.java new file mode 100644 index 0000000000000..789c02345e774 --- /dev/null +++ b/server/src/test/java/org/opensearch/wlm/WorkloadManagementTransportRequestHandlerTests.java @@ -0,0 +1,75 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.wlm; + +import org.opensearch.action.index.IndexRequest; +import org.opensearch.search.internal.ShardSearchRequest; +import org.opensearch.tasks.Task; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportChannel; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportRequestHandler; + +import java.util.Collections; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; + +public class WorkloadManagementTransportRequestHandlerTests extends OpenSearchTestCase { + private WorkloadManagementTransportInterceptor.RequestHandler sut; + private ThreadPool threadPool; + + private TestTransportRequestHandler actualHandler; + + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool(getTestName()); + actualHandler = new TestTransportRequestHandler<>(); + + sut = new WorkloadManagementTransportInterceptor.RequestHandler<>(threadPool, actualHandler); + } + + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdown(); + } + + public void testMessageReceivedForSearchWorkload() throws Exception { + ShardSearchRequest request = mock(ShardSearchRequest.class); + QueryGroupTask spyTask = getSpyTask(); + + sut.messageReceived(request, mock(TransportChannel.class), spyTask); + assertTrue(sut.isSearchWorkloadRequest(spyTask)); + } + + public void testMessageReceivedForNonSearchWorkload() throws Exception { + IndexRequest indexRequest = mock(IndexRequest.class); + Task task = mock(Task.class); + sut.messageReceived(indexRequest, mock(TransportChannel.class), task); + assertFalse(sut.isSearchWorkloadRequest(task)); + assertEquals(1, actualHandler.invokeCount); + } + + private static QueryGroupTask getSpyTask() { + final QueryGroupTask task = new QueryGroupTask(123, "transport", "Search", "test task", null, Collections.emptyMap()); + + return spy(task); + } + + private static class TestTransportRequestHandler implements TransportRequestHandler { + int invokeCount = 0; + + @Override + public void messageReceived(TransportRequest request, TransportChannel channel, Task task) throws Exception { + invokeCount += 1; + } + }; +} From 5c19809ec05d0a2cf03a5105c5333303bc21cb0d Mon Sep 17 00:00:00 2001 From: Gaurav Bafna <85113518+gbbafna@users.noreply.github.com> Date: Wed, 31 Jul 2024 09:50:18 +0530 Subject: [PATCH 34/68] Add setting to ignore throttling nodes for allocation of unassigned remote primaries (#14991) Signed-off-by: Gaurav Bafna --- CHANGELOG.md | 2 + .../allocator/BalancedShardsAllocator.java | 23 ++- .../allocator/LocalShardsBalancer.java | 17 +- .../common/settings/ClusterSettings.java | 1 + .../allocation/BalancedSingleShardTests.java | 15 -- .../DecideAllocateUnassignedTests.java | 154 ++++++++++++++++++ .../cluster/OpenSearchAllocationTestCase.java | 15 ++ .../cluster/routing/TestShardRouting.java | 26 +++ 8 files changed, 233 insertions(+), 20 deletions(-) create mode 100644 server/src/test/java/org/opensearch/cluster/routing/allocation/DecideAllocateUnassignedTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index a5355f010a99f..9689e391c6df3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Added - Fix for hasInitiatedFetching to fix allocation explain and manual reroute APIs (([#14972](https://github.com/opensearch-project/OpenSearch/pull/14972)) - [Workload Management] Add queryGroupId to Task ([14708](https://github.com/opensearch-project/OpenSearch/pull/14708)) +- Add setting to ignore throttling nodes for allocation of unassigned primaries in remote restore ([#14991](https://github.com/opensearch-project/OpenSearch/pull/14991)) - Add basic aggregation support for derived fields ([#14618](https://github.com/opensearch-project/OpenSearch/pull/14618)) ### Dependencies @@ -23,6 +24,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Removed ### Fixed +- Fix constraint bug which allows more primary shards than average primary shards per index ([#14908](https://github.com/opensearch-project/OpenSearch/pull/14908)) - Fix missing value of FieldSort for unsigned_long ([#14963](https://github.com/opensearch-project/OpenSearch/pull/14963)) ### Security diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index b2443490dd973..ae173bbf06c4f 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -154,6 +154,13 @@ public class BalancedShardsAllocator implements ShardsAllocator { Property.NodeScope ); + public static final Setting IGNORE_THROTTLE_FOR_REMOTE_RESTORE = Setting.boolSetting( + "cluster.routing.allocation.remote_primary.ignore_throttle", + true, + Property.Dynamic, + Property.NodeScope + ); + public static final Setting PRIMARY_SHARD_REBALANCE_BUFFER = Setting.floatSetting( "cluster.routing.allocation.rebalance.primary.buffer", 0.10f, @@ -173,6 +180,8 @@ public class BalancedShardsAllocator implements ShardsAllocator { private volatile WeightFunction weightFunction; private volatile float threshold; + private volatile boolean ignoreThrottleInRestore; + public BalancedShardsAllocator(Settings settings) { this(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); } @@ -182,6 +191,7 @@ public BalancedShardsAllocator(Settings settings, ClusterSettings clusterSetting setShardBalanceFactor(SHARD_BALANCE_FACTOR_SETTING.get(settings)); setIndexBalanceFactor(INDEX_BALANCE_FACTOR_SETTING.get(settings)); setPreferPrimaryShardRebalanceBuffer(PRIMARY_SHARD_REBALANCE_BUFFER.get(settings)); + setIgnoreThrottleInRestore(IGNORE_THROTTLE_FOR_REMOTE_RESTORE.get(settings)); updateWeightFunction(); setThreshold(THRESHOLD_SETTING.get(settings)); setPreferPrimaryShardBalance(PREFER_PRIMARY_SHARD_BALANCE.get(settings)); @@ -195,6 +205,7 @@ public BalancedShardsAllocator(Settings settings, ClusterSettings clusterSetting clusterSettings.addSettingsUpdateConsumer(PRIMARY_SHARD_REBALANCE_BUFFER, this::updatePreferPrimaryShardBalanceBuffer); clusterSettings.addSettingsUpdateConsumer(PREFER_PRIMARY_SHARD_REBALANCE, this::setPreferPrimaryShardRebalance); clusterSettings.addSettingsUpdateConsumer(THRESHOLD_SETTING, this::setThreshold); + clusterSettings.addSettingsUpdateConsumer(IGNORE_THROTTLE_FOR_REMOTE_RESTORE, this::setIgnoreThrottleInRestore); } /** @@ -205,6 +216,10 @@ private void setMovePrimaryFirst(boolean movePrimaryFirst) { setShardMovementStrategy(this.shardMovementStrategy); } + private void setIgnoreThrottleInRestore(boolean ignoreThrottleInRestore) { + this.ignoreThrottleInRestore = ignoreThrottleInRestore; + } + /** * Sets the correct Shard movement strategy to use. * If users are still using deprecated setting `move_primary_first`, we want behavior to remain unchanged. @@ -282,7 +297,8 @@ public void allocate(RoutingAllocation allocation) { weightFunction, threshold, preferPrimaryShardBalance, - preferPrimaryShardRebalance + preferPrimaryShardRebalance, + ignoreThrottleInRestore ); localShardsBalancer.allocateUnassigned(); localShardsBalancer.moveShards(); @@ -304,7 +320,8 @@ public ShardAllocationDecision decideShardAllocation(final ShardRouting shard, f weightFunction, threshold, preferPrimaryShardBalance, - preferPrimaryShardRebalance + preferPrimaryShardRebalance, + ignoreThrottleInRestore ); AllocateUnassignedDecision allocateUnassignedDecision = AllocateUnassignedDecision.NOT_TAKEN; MoveDecision moveDecision = MoveDecision.NOT_TAKEN; @@ -558,7 +575,7 @@ public Balancer( float threshold, boolean preferPrimaryBalance ) { - super(logger, allocation, shardMovementStrategy, weight, threshold, preferPrimaryBalance, false); + super(logger, allocation, shardMovementStrategy, weight, threshold, preferPrimaryBalance, false, false); } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java index 00eb79add9f1d..7e4ae58548c55 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java @@ -13,6 +13,7 @@ import org.apache.lucene.util.IntroSorter; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.RoutingNodes; import org.opensearch.cluster.routing.RoutingPool; @@ -60,6 +61,8 @@ public class LocalShardsBalancer extends ShardsBalancer { private final boolean preferPrimaryBalance; private final boolean preferPrimaryRebalance; + + private final boolean ignoreThrottleInRestore; private final BalancedShardsAllocator.WeightFunction weight; private final float threshold; @@ -77,7 +80,8 @@ public LocalShardsBalancer( BalancedShardsAllocator.WeightFunction weight, float threshold, boolean preferPrimaryBalance, - boolean preferPrimaryRebalance + boolean preferPrimaryRebalance, + boolean ignoreThrottleInRestore ) { this.logger = logger; this.allocation = allocation; @@ -94,6 +98,7 @@ public LocalShardsBalancer( this.preferPrimaryBalance = preferPrimaryBalance; this.preferPrimaryRebalance = preferPrimaryRebalance; this.shardMovementStrategy = shardMovementStrategy; + this.ignoreThrottleInRestore = ignoreThrottleInRestore; } /** @@ -918,7 +923,15 @@ AllocateUnassignedDecision decideAllocateUnassigned(final ShardRouting shard) { nodeExplanationMap.put(node.getNodeId(), new NodeAllocationResult(node.getRoutingNode().node(), currentDecision, 0)); nodeWeights.add(Tuple.tuple(node.getNodeId(), currentWeight)); } - if (currentDecision.type() == Decision.Type.YES || currentDecision.type() == Decision.Type.THROTTLE) { + + // For REMOTE_STORE recoveries, THROTTLE is as good as NO as we want faster recoveries + // The side effect of this are increased relocations post these allocations. + boolean considerThrottleAsNo = ignoreThrottleInRestore + && shard.recoverySource().getType() == RecoverySource.Type.REMOTE_STORE + && shard.primary(); + + if (currentDecision.type() == Decision.Type.YES + || (currentDecision.type() == Decision.Type.THROTTLE && considerThrottleAsNo == false)) { final boolean updateMinNode; if (currentWeight == minWeight) { /* we have an equal weight tie breaking: diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 2f60c731bc554..a73e5d44b7e02 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -268,6 +268,7 @@ public void apply(Settings value, Settings current, Settings previous) { BalancedShardsAllocator.SHARD_MOVE_PRIMARY_FIRST_SETTING, BalancedShardsAllocator.SHARD_MOVEMENT_STRATEGY_SETTING, BalancedShardsAllocator.THRESHOLD_SETTING, + BalancedShardsAllocator.IGNORE_THROTTLE_FOR_REMOTE_RESTORE, BreakerSettings.CIRCUIT_BREAKER_LIMIT_SETTING, BreakerSettings.CIRCUIT_BREAKER_OVERHEAD_SETTING, BreakerSettings.CIRCUIT_BREAKER_TYPE, diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/BalancedSingleShardTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/BalancedSingleShardTests.java index d29249cef0818..11a43019f648e 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/BalancedSingleShardTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/BalancedSingleShardTests.java @@ -33,7 +33,6 @@ package org.opensearch.cluster.routing.allocation; import org.opensearch.action.support.replication.ClusterStateCreationUtils; -import org.opensearch.cluster.ClusterInfo; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.node.DiscoveryNode; @@ -50,7 +49,6 @@ import org.opensearch.cluster.routing.allocation.decider.Decision.Type; import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Settings; -import org.opensearch.snapshots.SnapshotShardSizeInfo; import java.util.Arrays; import java.util.Collections; @@ -398,19 +396,6 @@ public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation alloca return Tuple.tuple(clusterState, rebalanceDecision); } - private RoutingAllocation newRoutingAllocation(AllocationDeciders deciders, ClusterState state) { - RoutingAllocation allocation = new RoutingAllocation( - deciders, - new RoutingNodes(state, false), - state, - ClusterInfo.EMPTY, - SnapshotShardSizeInfo.EMPTY, - System.nanoTime() - ); - allocation.debugDecision(true); - return allocation; - } - private void assertAssignedNodeRemainsSame( BalancedShardsAllocator allocator, RoutingAllocation routingAllocation, diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/DecideAllocateUnassignedTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/DecideAllocateUnassignedTests.java new file mode 100644 index 0000000000000..6df2ffc6149d5 --- /dev/null +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/DecideAllocateUnassignedTests.java @@ -0,0 +1,154 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing.allocation; + +import org.opensearch.Version; +import org.opensearch.action.support.replication.ClusterStateCreationUtils; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.OpenSearchAllocationTestCase; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.AllocationId; +import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.IndexShardRoutingTable; +import org.opensearch.cluster.routing.RoutingNode; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.cluster.routing.TestShardRouting; +import org.opensearch.cluster.routing.UnassignedInfo; +import org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; +import org.opensearch.cluster.routing.allocation.decider.AllocationDecider; +import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.opensearch.cluster.routing.allocation.decider.Decision; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.shard.ShardId; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_CREATION_DATE; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_VERSION_CREATED; +import static org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.IGNORE_THROTTLE_FOR_REMOTE_RESTORE; + +public class DecideAllocateUnassignedTests extends OpenSearchAllocationTestCase { + public void testAllocateUnassignedRemoteRestore_IgnoreThrottle() { + final String[] indices = { "idx1" }; + // Create a cluster state with 1 indices, each with 1 started primary shard, and only + // one node initially so that all primary shards get allocated to the same node. + // + // When we add 1 more 1 index with 1 started primary shard and 1 more node , if the new node throttles the recovery + // shard should get assigned on the older node if IgnoreThrottle is set to true + ClusterState clusterState = ClusterStateCreationUtils.state(1, indices, 1); + clusterState = addNodesToClusterState(clusterState, 1); + clusterState = addRestoringIndexToClusterState(clusterState, "idx2"); + List allocationDeciders = getAllocationDecidersThrottleOnNode1(); + RoutingAllocation routingAllocation = newRoutingAllocation(new AllocationDeciders(allocationDeciders), clusterState); + // allocate and get the node that is now relocating + Settings build = Settings.builder().put(IGNORE_THROTTLE_FOR_REMOTE_RESTORE.getKey(), true).build(); + BalancedShardsAllocator allocator = new BalancedShardsAllocator(build); + allocator.allocate(routingAllocation); + assertEquals(routingAllocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), "node_0"); + assertEquals(routingAllocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).getIndexName(), "idx2"); + assertFalse(routingAllocation.routingNodes().hasUnassignedPrimaries()); + } + + public void testAllocateUnassignedRemoteRestore() { + final String[] indices = { "idx1" }; + // Create a cluster state with 1 indices, each with 1 started primary shard, and only + // one node initially so that all primary shards get allocated to the same node. + // + // When we add 1 more 1 index with 1 started primary shard and 1 more node , if the new node throttles the recovery + // shard should remain unassigned if IgnoreThrottle is set to false + ClusterState clusterState = ClusterStateCreationUtils.state(1, indices, 1); + clusterState = addNodesToClusterState(clusterState, 1); + clusterState = addRestoringIndexToClusterState(clusterState, "idx2"); + List allocationDeciders = getAllocationDecidersThrottleOnNode1(); + RoutingAllocation routingAllocation = newRoutingAllocation(new AllocationDeciders(allocationDeciders), clusterState); + // allocate and get the node that is now relocating + Settings build = Settings.builder().put(IGNORE_THROTTLE_FOR_REMOTE_RESTORE.getKey(), false).build(); + BalancedShardsAllocator allocator = new BalancedShardsAllocator(build); + allocator.allocate(routingAllocation); + assertEquals(routingAllocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), 0); + assertTrue(routingAllocation.routingNodes().hasUnassignedPrimaries()); + } + + private static List getAllocationDecidersThrottleOnNode1() { + // Allocation Deciders to throttle on `node_1` + final Set throttleNodes = new HashSet<>(); + throttleNodes.add("node_1"); + AllocationDecider allocationDecider = new AllocationDecider() { + @Override + public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { + if (throttleNodes.contains(node.nodeId())) { + return Decision.THROTTLE; + } + return Decision.YES; + } + }; + List allocationDeciders = Arrays.asList(allocationDecider); + return allocationDeciders; + } + + private ClusterState addNodesToClusterState(ClusterState clusterState, int nodeId) { + DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(clusterState.nodes()); + DiscoveryNode discoveryNode = newNode("node_" + nodeId); + nodesBuilder.add(discoveryNode); + return ClusterState.builder(clusterState).nodes(nodesBuilder).build(); + } + + private ClusterState addRestoringIndexToClusterState(ClusterState clusterState, String index) { + final int primaryTerm = 1 + randomInt(200); + final ShardId shardId = new ShardId(index, "_na_", 0); + + IndexMetadata indexMetadata = IndexMetadata.builder(index) + .settings( + Settings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .put(SETTING_CREATION_DATE, System.currentTimeMillis()) + ) + .primaryTerm(0, primaryTerm) + .build(); + + IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId); + UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.EXISTING_INDEX_RESTORED, null); + indexShardRoutingBuilder.addShard( + TestShardRouting.newShardRoutingRemoteRestore(index, shardId, null, null, true, ShardRoutingState.UNASSIGNED, unassignedInfo) + ); + final IndexShardRoutingTable indexShardRoutingTable = indexShardRoutingBuilder.build(); + + IndexMetadata.Builder indexMetadataBuilder = new IndexMetadata.Builder(indexMetadata); + indexMetadataBuilder.putInSyncAllocationIds( + 0, + indexShardRoutingTable.activeShards() + .stream() + .map(ShardRouting::allocationId) + .map(AllocationId::getId) + .collect(Collectors.toSet()) + ); + ClusterState.Builder state = ClusterState.builder(clusterState); + state.metadata(Metadata.builder(clusterState.metadata()).put(indexMetadataBuilder.build(), false).generateClusterUuidIfNeeded()); + state.routingTable( + RoutingTable.builder(clusterState.routingTable()) + .add(IndexRoutingTable.builder(indexMetadata.getIndex()).addIndexShard(indexShardRoutingTable)) + .build() + ); + return state.build(); + } + +} diff --git a/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationTestCase.java b/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationTestCase.java index f6113860e3907..34b8c58a9c5b2 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationTestCase.java @@ -37,6 +37,7 @@ import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RoutingNode; +import org.opensearch.cluster.routing.RoutingNodes; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.cluster.routing.allocation.AllocationService; @@ -287,6 +288,19 @@ public static ClusterState startShardsAndReroute( return allocationService.reroute(allocationService.applyStartedShards(clusterState, initializingShards), "reroute after starting"); } + protected RoutingAllocation newRoutingAllocation(AllocationDeciders deciders, ClusterState state) { + RoutingAllocation allocation = new RoutingAllocation( + deciders, + new RoutingNodes(state, false), + state, + ClusterInfo.EMPTY, + SnapshotShardSizeInfo.EMPTY, + System.nanoTime() + ); + allocation.debugDecision(true); + return allocation; + } + public static class TestAllocateDecision extends AllocationDecider { private final Decision decision; @@ -465,5 +479,6 @@ public void allocateUnassigned( unassignedAllocationHandler.removeAndIgnore(UnassignedInfo.AllocationStatus.DELAYED_ALLOCATION, allocation.changes()); } } + } } diff --git a/test/framework/src/main/java/org/opensearch/cluster/routing/TestShardRouting.java b/test/framework/src/main/java/org/opensearch/cluster/routing/TestShardRouting.java index f67108345550f..c7c71f0f569e5 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/routing/TestShardRouting.java +++ b/test/framework/src/main/java/org/opensearch/cluster/routing/TestShardRouting.java @@ -205,6 +205,32 @@ public static ShardRouting newShardRouting( ); } + public static ShardRouting newShardRoutingRemoteRestore( + String index, + ShardId shardId, + String currentNodeId, + String relocatingNodeId, + boolean primary, + ShardRoutingState state, + UnassignedInfo unassignedInfo + ) { + return new ShardRouting( + shardId, + currentNodeId, + relocatingNodeId, + primary, + state, + new RecoverySource.RemoteStoreRecoverySource( + UUIDs.randomBase64UUID(), + Version.V_EMPTY, + new IndexId(shardId.getIndexName(), shardId.getIndexName()) + ), + unassignedInfo, + buildAllocationId(state), + -1 + ); + } + public static ShardRouting newShardRouting( ShardId shardId, String currentNodeId, From 597747dcbf7c14513dd07887048976620164f4e0 Mon Sep 17 00:00:00 2001 From: Craig Perkins Date: Wed, 31 Jul 2024 07:56:11 -0400 Subject: [PATCH 35/68] Add ThreadContextPermission for markAsSystemContext and allow core to perform the method (#15016) * Add RuntimePermission for markAsSystemContext and allow core to perform the method Signed-off-by: Craig Perkins * private Signed-off-by: Craig Perkins * Surround with doPrivileged Signed-off-by: Craig Perkins * Create ThreadContextAccess Signed-off-by: Craig Perkins * Create notion of ThreadContextPermission Signed-off-by: Craig Perkins * Add to CHANGELOG Signed-off-by: Craig Perkins * Add javadoc Signed-off-by: Craig Perkins * Add to test-framework.policy file Signed-off-by: Craig Perkins * Mark as internal Signed-off-by: Craig Perkins --------- Signed-off-by: Craig Perkins --- CHANGELOG.md | 1 + .../secure_sm/ThreadContextPermission.java | 40 ++++++++++++++++++ .../service/ClusterApplierService.java | 3 +- .../cluster/service/MasterService.java | 3 +- .../common/util/concurrent/ThreadContext.java | 17 ++++++++ .../util/concurrent/ThreadContextAccess.java | 41 +++++++++++++++++++ .../seqno/GlobalCheckpointSyncAction.java | 3 +- .../RetentionLeaseBackgroundSyncAction.java | 3 +- .../index/seqno/RetentionLeaseSyncAction.java | 3 +- .../checkpoint/PublishCheckpointAction.java | 3 +- .../transport/RemoteClusterConnection.java | 3 +- .../transport/SniffConnectionStrategy.java | 3 +- .../org/opensearch/bootstrap/security.policy | 1 + .../bootstrap/test-framework.policy | 1 + .../metadata/TemplateUpgradeServiceTests.java | 3 +- .../util/concurrent/ThreadContextTests.java | 8 ++-- ...ContextBasedTracerContextStorageTests.java | 3 +- .../org/opensearch/bootstrap/test.policy | 2 +- .../FakeThreadPoolClusterManagerService.java | 3 +- 19 files changed, 128 insertions(+), 16 deletions(-) create mode 100644 libs/secure-sm/src/main/java/org/opensearch/secure_sm/ThreadContextPermission.java create mode 100644 server/src/main/java/org/opensearch/common/util/concurrent/ThreadContextAccess.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 9689e391c6df3..7b49298192800 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Workload Management] Add queryGroupId to Task ([14708](https://github.com/opensearch-project/OpenSearch/pull/14708)) - Add setting to ignore throttling nodes for allocation of unassigned primaries in remote restore ([#14991](https://github.com/opensearch-project/OpenSearch/pull/14991)) - Add basic aggregation support for derived fields ([#14618](https://github.com/opensearch-project/OpenSearch/pull/14618)) +- Add ThreadContextPermission for markAsSystemContext and allow core to perform the method ([#15016](https://github.com/opensearch-project/OpenSearch/pull/15016)) ### Dependencies - Bump `org.apache.commons:commons-lang3` from 3.14.0 to 3.15.0 ([#14861](https://github.com/opensearch-project/OpenSearch/pull/14861)) diff --git a/libs/secure-sm/src/main/java/org/opensearch/secure_sm/ThreadContextPermission.java b/libs/secure-sm/src/main/java/org/opensearch/secure_sm/ThreadContextPermission.java new file mode 100644 index 0000000000000..2f33eb513c165 --- /dev/null +++ b/libs/secure-sm/src/main/java/org/opensearch/secure_sm/ThreadContextPermission.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.secure_sm; + +import java.security.BasicPermission; + +/** + * Permission to utilize methods in the ThreadContext class that are normally not accessible + * + * @see ThreadGroup + * @see SecureSM + */ +public final class ThreadContextPermission extends BasicPermission { + + /** + * Creates a new ThreadContextPermission object. + * + * @param name target name + */ + public ThreadContextPermission(String name) { + super(name); + } + + /** + * Creates a new ThreadContextPermission object. + * This constructor exists for use by the {@code Policy} object to instantiate new Permission objects. + * + * @param name target name + * @param actions ignored + */ + public ThreadContextPermission(String name, String actions) { + super(name, actions); + } +} diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java index 6234427445754..b2548a8976c73 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java @@ -61,6 +61,7 @@ import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.concurrent.ThreadContextAccess; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; import org.opensearch.telemetry.metrics.tags.Tags; @@ -396,7 +397,7 @@ private void submitStateUpdateTask( final ThreadContext threadContext = threadPool.getThreadContext(); final Supplier supplier = threadContext.newRestorableContext(true); try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { - threadContext.markAsSystemContext(); + ThreadContextAccess.doPrivilegedVoid(threadContext::markAsSystemContext); final UpdateTask updateTask = new UpdateTask( config.priority(), source, diff --git a/server/src/main/java/org/opensearch/cluster/service/MasterService.java b/server/src/main/java/org/opensearch/cluster/service/MasterService.java index 4ab8255df7658..713de8cdd0fda 100644 --- a/server/src/main/java/org/opensearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/opensearch/cluster/service/MasterService.java @@ -66,6 +66,7 @@ import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.concurrent.ThreadContextAccess; import org.opensearch.core.Assertions; import org.opensearch.core.common.text.Text; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; @@ -1022,7 +1023,7 @@ public void submitStateUpdateTasks( final ThreadContext threadContext = threadPool.getThreadContext(); final Supplier supplier = threadContext.newRestorableContext(true); try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { - threadContext.markAsSystemContext(); + ThreadContextAccess.doPrivilegedVoid(threadContext::markAsSystemContext); List safeTasks = tasks.entrySet() .stream() diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java index 906a27e9f398c..b955934c4f547 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java @@ -45,11 +45,13 @@ import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.http.HttpTransportSettings; +import org.opensearch.secure_sm.ThreadContextPermission; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskThreadContextStatePropagator; import java.io.IOException; import java.nio.charset.StandardCharsets; +import java.security.Permission; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -111,6 +113,10 @@ public final class ThreadContext implements Writeable { */ public static final String ACTION_ORIGIN_TRANSIENT_NAME = "action.origin"; + // thread context permissions + + private static final Permission ACCESS_SYSTEM_THREAD_CONTEXT_PERMISSION = new ThreadContextPermission("markAsSystemContext"); + private static final Logger logger = LogManager.getLogger(ThreadContext.class); private static final ThreadContextStruct DEFAULT_CONTEXT = new ThreadContextStruct(); private final Map defaultHeader; @@ -554,8 +560,19 @@ boolean isDefaultContext() { /** * Marks this thread context as an internal system context. This signals that actions in this context are issued * by the system itself rather than by a user action. + * + * Usage of markAsSystemContext is guarded by a ThreadContextPermission. In order to use + * markAsSystemContext, the codebase needs to explicitly be granted permission in the JSM policy file. + * + * Add an entry in the grant portion of the policy file like this: + * + * permission org.opensearch.secure_sm.ThreadContextPermission "markAsSystemContext"; */ public void markAsSystemContext() { + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + sm.checkPermission(ACCESS_SYSTEM_THREAD_CONTEXT_PERMISSION); + } threadLocal.set(threadLocal.get().setSystemContext(propagators)); } diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContextAccess.java b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContextAccess.java new file mode 100644 index 0000000000000..14f8b8d79bf4d --- /dev/null +++ b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContextAccess.java @@ -0,0 +1,41 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util.concurrent; + +import org.opensearch.SpecialPermission; +import org.opensearch.common.annotation.InternalApi; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +/** + * This class wraps the {@link ThreadContext} operations requiring access in + * {@link AccessController#doPrivileged(PrivilegedAction)} blocks. + * + * @opensearch.internal + */ +@SuppressWarnings("removal") +@InternalApi +public final class ThreadContextAccess { + + private ThreadContextAccess() {} + + public static T doPrivileged(PrivilegedAction operation) { + SpecialPermission.check(); + return AccessController.doPrivileged(operation); + } + + public static void doPrivilegedVoid(Runnable action) { + SpecialPermission.check(); + AccessController.doPrivileged((PrivilegedAction) () -> { + action.run(); + return null; + }); + } +} diff --git a/server/src/main/java/org/opensearch/index/seqno/GlobalCheckpointSyncAction.java b/server/src/main/java/org/opensearch/index/seqno/GlobalCheckpointSyncAction.java index c6a1f5f27a875..fedf239871368 100644 --- a/server/src/main/java/org/opensearch/index/seqno/GlobalCheckpointSyncAction.java +++ b/server/src/main/java/org/opensearch/index/seqno/GlobalCheckpointSyncAction.java @@ -44,6 +44,7 @@ import org.opensearch.common.inject.Inject; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.concurrent.ThreadContextAccess; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.shard.ShardId; @@ -98,7 +99,7 @@ public GlobalCheckpointSyncAction( public void updateGlobalCheckpointForShard(final ShardId shardId) { final ThreadContext threadContext = threadPool.getThreadContext(); try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { - threadContext.markAsSystemContext(); + ThreadContextAccess.doPrivilegedVoid(threadContext::markAsSystemContext); execute(new Request(shardId), ActionListener.wrap(r -> {}, e -> { if (ExceptionsHelper.unwrap(e, AlreadyClosedException.class, IndexShardClosedException.class) == null) { logger.info(new ParameterizedMessage("{} global checkpoint sync failed", shardId), e); diff --git a/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncAction.java b/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncAction.java index 5fa0a1a6459e7..e8ebf11ef0e5c 100644 --- a/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncAction.java +++ b/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncAction.java @@ -48,6 +48,7 @@ import org.opensearch.common.inject.Inject; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.concurrent.ThreadContextAccess; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -122,7 +123,7 @@ final void backgroundSync(ShardId shardId, String primaryAllocationId, long prim final ThreadContext threadContext = threadPool.getThreadContext(); try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { // we have to execute under the system context so that if security is enabled the sync is authorized - threadContext.markAsSystemContext(); + ThreadContextAccess.doPrivilegedVoid(threadContext::markAsSystemContext); final Request request = new Request(shardId, retentionLeases); final ReplicationTask task = (ReplicationTask) taskManager.register("transport", "retention_lease_background_sync", request); transportService.sendChildRequest( diff --git a/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseSyncAction.java b/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseSyncAction.java index ca3c7e1d49700..9e8437ca78879 100644 --- a/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseSyncAction.java +++ b/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseSyncAction.java @@ -50,6 +50,7 @@ import org.opensearch.common.inject.Inject; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.concurrent.ThreadContextAccess; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -137,7 +138,7 @@ final void sync( final ThreadContext threadContext = threadPool.getThreadContext(); try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { // we have to execute under the system context so that if security is enabled the sync is authorized - threadContext.markAsSystemContext(); + ThreadContextAccess.doPrivilegedVoid(threadContext::markAsSystemContext); final Request request = new Request(shardId, retentionLeases); final ReplicationTask task = (ReplicationTask) taskManager.register("transport", "retention_lease_sync", request); transportService.sendChildRequest( diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java index 8f39aa194b06c..d1e2884956f5c 100644 --- a/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java @@ -24,6 +24,7 @@ import org.opensearch.common.inject.Inject; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.concurrent.ThreadContextAccess; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.index.IndexNotFoundException; @@ -113,7 +114,7 @@ final void publish(IndexShard indexShard, ReplicationCheckpoint checkpoint) { final ThreadContext threadContext = threadPool.getThreadContext(); try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { // we have to execute under the system context so that if security is enabled the sync is authorized - threadContext.markAsSystemContext(); + ThreadContextAccess.doPrivilegedVoid(threadContext::markAsSystemContext); PublishCheckpointRequest request = new PublishCheckpointRequest(checkpoint); final ReplicationTask task = (ReplicationTask) taskManager.register("transport", "segrep_publish_checkpoint", request); final ReplicationTimer timer = new ReplicationTimer(); diff --git a/server/src/main/java/org/opensearch/transport/RemoteClusterConnection.java b/server/src/main/java/org/opensearch/transport/RemoteClusterConnection.java index 8a5f6dfffb036..8f0ee52ac3acd 100644 --- a/server/src/main/java/org/opensearch/transport/RemoteClusterConnection.java +++ b/server/src/main/java/org/opensearch/transport/RemoteClusterConnection.java @@ -40,6 +40,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.concurrent.ThreadContextAccess; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; @@ -136,7 +137,7 @@ void collectNodes(ActionListener> listener) { new ContextPreservingActionListener<>(threadContext.newRestorableContext(false), listener); try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { // we stash any context here since this is an internal execution and should not leak any existing context information - threadContext.markAsSystemContext(); + ThreadContextAccess.doPrivilegedVoid(threadContext::markAsSystemContext); final ClusterStateRequest request = new ClusterStateRequest(); request.clear(); diff --git a/server/src/main/java/org/opensearch/transport/SniffConnectionStrategy.java b/server/src/main/java/org/opensearch/transport/SniffConnectionStrategy.java index 07ba96b135189..1d94228218fd0 100644 --- a/server/src/main/java/org/opensearch/transport/SniffConnectionStrategy.java +++ b/server/src/main/java/org/opensearch/transport/SniffConnectionStrategy.java @@ -47,6 +47,7 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.concurrent.ThreadContextAccess; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; @@ -349,7 +350,7 @@ private void collectRemoteNodes(Iterator> seedNodes, Act try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { // we stash any context here since this is an internal execution and should not leak any // existing context information. - threadContext.markAsSystemContext(); + ThreadContextAccess.doPrivilegedVoid(threadContext::markAsSystemContext); transportService.sendRequest( connection, ClusterStateAction.NAME, diff --git a/server/src/main/resources/org/opensearch/bootstrap/security.policy b/server/src/main/resources/org/opensearch/bootstrap/security.policy index 55e8db0d9c6a3..b7aaa2e3eec48 100644 --- a/server/src/main/resources/org/opensearch/bootstrap/security.policy +++ b/server/src/main/resources/org/opensearch/bootstrap/security.policy @@ -48,6 +48,7 @@ grant codeBase "${codebase.opensearch}" { permission java.lang.RuntimePermission "setContextClassLoader"; // needed for SPI class loading permission java.lang.RuntimePermission "accessDeclaredMembers"; + permission org.opensearch.secure_sm.ThreadContextPermission "markAsSystemContext"; }; //// Very special jar permissions: diff --git a/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy b/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy index 0abfd7ef22ae7..f674c90c45a0e 100644 --- a/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy +++ b/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy @@ -157,4 +157,5 @@ grant { permission java.lang.RuntimePermission "reflectionFactoryAccess"; permission java.lang.RuntimePermission "accessClassInPackage.sun.reflect"; permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; + permission org.opensearch.secure_sm.ThreadContextPermission "markAsSystemContext"; }; diff --git a/server/src/test/java/org/opensearch/cluster/metadata/TemplateUpgradeServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/TemplateUpgradeServiceTests.java index 36d984b7eb99b..562e293083633 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/TemplateUpgradeServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/TemplateUpgradeServiceTests.java @@ -47,6 +47,7 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.collect.Tuple; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.concurrent.ThreadContextAccess; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; @@ -225,7 +226,7 @@ public void testUpdateTemplates() { service.upgradesInProgress.set(additionsCount + deletionsCount + 2); // +2 to skip tryFinishUpgrade final ThreadContext threadContext = threadPool.getThreadContext(); try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { - threadContext.markAsSystemContext(); + ThreadContextAccess.doPrivilegedVoid(threadContext::markAsSystemContext); service.upgradeTemplates(additions, deletions); } diff --git a/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java b/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java index 4e66575711046..4c7cd4513412d 100644 --- a/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java +++ b/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java @@ -565,7 +565,7 @@ public void testPreservesThreadsOriginalContextOnRunException() throws IOExcepti threadContext.putHeader("foo", "bar"); boolean systemContext = randomBoolean(); if (systemContext) { - threadContext.markAsSystemContext(); + ThreadContextAccess.doPrivilegedVoid(threadContext::markAsSystemContext); } threadContext.putTransient("foo", "bar_transient"); withContext = threadContext.preserveContext(new AbstractRunnable() { @@ -736,7 +736,7 @@ public void testMarkAsSystemContext() throws IOException { assertFalse(threadContext.isSystemContext()); try (ThreadContext.StoredContext context = threadContext.stashContext()) { assertFalse(threadContext.isSystemContext()); - threadContext.markAsSystemContext(); + ThreadContextAccess.doPrivilegedVoid(threadContext::markAsSystemContext); assertTrue(threadContext.isSystemContext()); } assertFalse(threadContext.isSystemContext()); @@ -761,7 +761,7 @@ public void testSystemContextWithPropagator() { assertEquals(Integer.valueOf(1), threadContext.getTransient("test_transient_propagation_key")); assertEquals("bar", threadContext.getHeader("foo")); try (ThreadContext.StoredContext ctx = threadContext.stashContext()) { - threadContext.markAsSystemContext(); + ThreadContextAccess.doPrivilegedVoid(threadContext::markAsSystemContext); assertNull(threadContext.getHeader("foo")); assertNull(threadContext.getTransient("test_transient_propagation_key")); assertEquals("1", threadContext.getHeader("default")); @@ -793,7 +793,7 @@ public void testSerializeSystemContext() throws IOException { threadContext.writeTo(out); try (ThreadContext.StoredContext ctx = threadContext.stashContext()) { assertEquals("test", threadContext.getTransient("test_transient_propagation_key")); - threadContext.markAsSystemContext(); + ThreadContextAccess.doPrivilegedVoid(threadContext::markAsSystemContext); threadContext.writeTo(outFromSystemContext); assertNull(threadContext.getHeader("foo")); assertNull(threadContext.getTransient("test_transient_propagation_key")); diff --git a/server/src/test/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorageTests.java b/server/src/test/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorageTests.java index bf11bcaf39a96..98dfc367c20f5 100644 --- a/server/src/test/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorageTests.java +++ b/server/src/test/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorageTests.java @@ -12,6 +12,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.util.concurrent.ThreadContext.StoredContext; +import org.opensearch.common.util.concurrent.ThreadContextAccess; import org.opensearch.telemetry.Telemetry; import org.opensearch.telemetry.TelemetrySettings; import org.opensearch.telemetry.metrics.MetricsTelemetry; @@ -260,7 +261,7 @@ public void testSpanNotPropagatedToChildSystemThreadContext() { try (StoredContext ignored = threadContext.stashContext()) { assertThat(threadContext.getTransient(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(not(nullValue()))); assertThat(threadContextStorage.get(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(span)); - threadContext.markAsSystemContext(); + ThreadContextAccess.doPrivilegedVoid(threadContext::markAsSystemContext); assertThat(threadContext.getTransient(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(nullValue())); } } diff --git a/server/src/test/resources/org/opensearch/bootstrap/test.policy b/server/src/test/resources/org/opensearch/bootstrap/test.policy index 7b0a9b3d5d709..c2b5a8e9c0a4e 100644 --- a/server/src/test/resources/org/opensearch/bootstrap/test.policy +++ b/server/src/test/resources/org/opensearch/bootstrap/test.policy @@ -7,7 +7,7 @@ */ grant { - // allow to test Security policy and codebases + // allow to test Security policy and codebases permission java.util.PropertyPermission "*", "read,write"; permission java.security.SecurityPermission "createPolicy.JavaPolicy"; }; diff --git a/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerService.java b/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerService.java index 53ef595c7931e..64f3dbc4fd967 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerService.java +++ b/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerService.java @@ -44,6 +44,7 @@ import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.concurrent.ThreadContextAccess; import org.opensearch.core.action.ActionListener; import org.opensearch.node.Node; import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; @@ -134,7 +135,7 @@ public void run() { scheduledNextTask = false; final ThreadContext threadContext = threadPool.getThreadContext(); try (ThreadContext.StoredContext ignored = threadContext.stashContext()) { - threadContext.markAsSystemContext(); + ThreadContextAccess.doPrivilegedVoid(threadContext::markAsSystemContext); task.run(); } if (waitForPublish == false) { From d158ec6d431615b16192c34490b46752f3eb3e0f Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Wed, 31 Jul 2024 11:11:28 -0400 Subject: [PATCH 36/68] Fix MacOS Mx (arm64) and Linux (arm64, ppc64le, s390x) checks (#15036) Signed-off-by: Andriy Redko --- .../internal/InternalDistributionBwcSetupPlugin.java | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionBwcSetupPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionBwcSetupPlugin.java index 6892af1b17f97..0502280cb69ad 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionBwcSetupPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionBwcSetupPlugin.java @@ -158,7 +158,17 @@ private static List resolveArchiveProjects(File checkoutDir projects.addAll(asList("deb", "rpm")); if (bwcVersion.onOrAfter("7.0.0")) { // starting with 7.0 we bundle a jdk which means we have platform-specific archives - projects.addAll(asList("darwin-tar", "linux-tar", "windows-zip")); + projects.addAll( + asList( + "darwin-tar", + "darwin-arm64-tar", + "linux-tar", + "linux-arm64-tar", + "linux-ppc64le-tar", + "linux-s390x-tar", + "windows-zip" + ) + ); } else { // prior to 7.0 we published only a single zip and tar archives projects.addAll(asList("zip", "tar")); } From 79f45be4a544dd3519521294b63bd1630c3dfd54 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Wed, 31 Jul 2024 13:49:41 -0400 Subject: [PATCH 37/68] [Streaming Indexing] Enhance RestClient with a new streaming API support (#14437) Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + buildSrc/version.properties | 4 +- client/rest/build.gradle | 67 ++- .../rest/licenses/httpclient5-5.2.1.jar.sha1 | 1 - .../rest/licenses/httpclient5-5.2.3.jar.sha1 | 1 + client/rest/licenses/httpcore5-5.2.2.jar.sha1 | 1 - client/rest/licenses/httpcore5-5.2.5.jar.sha1 | 1 + .../rest/licenses/httpcore5-h2-5.2.2.jar.sha1 | 1 - .../rest/licenses/httpcore5-h2-5.2.5.jar.sha1 | 1 + .../httpcore5-reactive-5.2.5.jar.sha1 | 1 + .../licenses/httpcore5-reactive-LICENSE.txt | 558 ++++++++++++++++++ .../licenses/httpcore5-reactive-NOTICE.txt | 8 + .../licenses/reactive-streams-1.0.4.jar.sha1 | 1 + .../licenses/reactive-streams-LICENSE.txt | 21 + .../rest/licenses/reactive-streams-NOTICE.txt | 0 .../licenses/reactor-core-3.5.19.jar.sha1 | 1 + client/rest/licenses/reactor-core-LICENSE.txt | 201 +++++++ client/rest/licenses/reactor-core-NOTICE.txt | 0 .../org/opensearch/client/Cancellable.java | 29 +- .../java/org/opensearch/client/Response.java | 73 +-- .../client/ResponseWarningsExtractor.java | 99 ++++ .../org/opensearch/client/RestClient.java | 291 ++++++++- .../opensearch/client/StreamingRequest.java | 114 ++++ .../opensearch/client/StreamingResponse.java | 96 +++ .../http/ReactiveHttpUriRequestProducer.java | 75 +++ .../opensearch/client/RestClientTests.java | 13 + .../licenses/httpclient5-5.2.1.jar.sha1 | 1 - .../licenses/httpclient5-5.2.3.jar.sha1 | 1 + .../sniffer/licenses/httpcore5-5.2.2.jar.sha1 | 1 - .../sniffer/licenses/httpcore5-5.2.5.jar.sha1 | 1 + plugins/transport-reactor-netty4/build.gradle | 6 +- .../rest/ReactorNetty4BadRequestIT.java | 115 ++++ .../rest/ReactorNetty4HeadBodyIsEmptyIT.java | 204 +++++++ .../rest/ReactorNetty4StreamingIT.java | 139 +++++ .../rest/ReactorNetty4StreamingStressIT.java | 95 +++ .../ReactorNetty4HttpServerTransport.java | 5 +- .../ReactorNetty4NonStreamingHttpChannel.java | 11 +- .../ReactorNetty4StreamingHttpChannel.java | 2 + ...ReactorNetty4StreamingRequestConsumer.java | 2 +- ...eactorNetty4StreamingResponseProducer.java | 6 +- qa/smoke-test-http/build.gradle | 1 + .../opensearch/http/HttpSmokeTestCase.java | 7 +- .../http/IdentityAuthenticationIT.java | 4 +- .../WEB-INF/jboss-deployment-structure.xml | 3 + .../org/opensearch/rest/RestController.java | 3 +- .../document/RestBulkStreamingAction.java | 48 +- 46 files changed, 2180 insertions(+), 134 deletions(-) delete mode 100644 client/rest/licenses/httpclient5-5.2.1.jar.sha1 create mode 100644 client/rest/licenses/httpclient5-5.2.3.jar.sha1 delete mode 100644 client/rest/licenses/httpcore5-5.2.2.jar.sha1 create mode 100644 client/rest/licenses/httpcore5-5.2.5.jar.sha1 delete mode 100644 client/rest/licenses/httpcore5-h2-5.2.2.jar.sha1 create mode 100644 client/rest/licenses/httpcore5-h2-5.2.5.jar.sha1 create mode 100644 client/rest/licenses/httpcore5-reactive-5.2.5.jar.sha1 create mode 100644 client/rest/licenses/httpcore5-reactive-LICENSE.txt create mode 100644 client/rest/licenses/httpcore5-reactive-NOTICE.txt create mode 100644 client/rest/licenses/reactive-streams-1.0.4.jar.sha1 create mode 100644 client/rest/licenses/reactive-streams-LICENSE.txt create mode 100644 client/rest/licenses/reactive-streams-NOTICE.txt create mode 100644 client/rest/licenses/reactor-core-3.5.19.jar.sha1 create mode 100644 client/rest/licenses/reactor-core-LICENSE.txt create mode 100644 client/rest/licenses/reactor-core-NOTICE.txt create mode 100644 client/rest/src/main/java/org/opensearch/client/ResponseWarningsExtractor.java create mode 100644 client/rest/src/main/java/org/opensearch/client/StreamingRequest.java create mode 100644 client/rest/src/main/java/org/opensearch/client/StreamingResponse.java create mode 100644 client/rest/src/main/java/org/opensearch/client/http/ReactiveHttpUriRequestProducer.java delete mode 100644 client/sniffer/licenses/httpclient5-5.2.1.jar.sha1 create mode 100644 client/sniffer/licenses/httpclient5-5.2.3.jar.sha1 delete mode 100644 client/sniffer/licenses/httpcore5-5.2.2.jar.sha1 create mode 100644 client/sniffer/licenses/httpcore5-5.2.5.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4BadRequestIT.java create mode 100644 plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4HeadBodyIsEmptyIT.java create mode 100644 plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4StreamingIT.java create mode 100644 plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4StreamingStressIT.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 7b49298192800..f63c7c5524d86 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix for hasInitiatedFetching to fix allocation explain and manual reroute APIs (([#14972](https://github.com/opensearch-project/OpenSearch/pull/14972)) - [Workload Management] Add queryGroupId to Task ([14708](https://github.com/opensearch-project/OpenSearch/pull/14708)) - Add setting to ignore throttling nodes for allocation of unassigned primaries in remote restore ([#14991](https://github.com/opensearch-project/OpenSearch/pull/14991)) +- [Streaming Indexing] Enhance RestClient with a new streaming API support ([#14437](https://github.com/opensearch-project/OpenSearch/pull/14437)) - Add basic aggregation support for derived fields ([#14618](https://github.com/opensearch-project/OpenSearch/pull/14618)) - Add ThreadContextPermission for markAsSystemContext and allow core to perform the method ([#15016](https://github.com/opensearch-project/OpenSearch/pull/15016)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 7d32ed3df7b76..eb67af909bccf 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -37,8 +37,8 @@ reactor_netty = 1.1.21 reactor = 3.5.19 # client dependencies -httpclient5 = 5.2.1 -httpcore5 = 5.2.2 +httpclient5 = 5.2.3 +httpcore5 = 5.2.5 httpclient = 4.5.14 httpcore = 4.4.16 httpasyncclient = 4.1.5 diff --git a/client/rest/build.gradle b/client/rest/build.gradle index f18df65dfddfa..93faf0024b51e 100644 --- a/client/rest/build.gradle +++ b/client/rest/build.gradle @@ -47,10 +47,15 @@ dependencies { api "org.apache.httpcomponents.client5:httpclient5:${versions.httpclient5}" api "org.apache.httpcomponents.core5:httpcore5:${versions.httpcore5}" api "org.apache.httpcomponents.core5:httpcore5-h2:${versions.httpcore5}" + api "org.apache.httpcomponents.core5:httpcore5-reactive:${versions.httpcore5}" api "commons-codec:commons-codec:${versions.commonscodec}" api "commons-logging:commons-logging:${versions.commonslogging}" api "org.slf4j:slf4j-api:${versions.slf4j}" + // reactor + api "io.projectreactor:reactor-core:${versions.reactor}" + api "org.reactivestreams:reactive-streams:${versions.reactivestreams}" + testImplementation project(":client:test") testImplementation "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" testImplementation "junit:junit:${versions.junit}" @@ -93,22 +98,52 @@ testingConventions { } } -thirdPartyAudit.ignoreMissingClasses( - 'org.conscrypt.Conscrypt', - 'org.slf4j.impl.StaticLoggerBinder', - 'org.slf4j.impl.StaticMDCBinder', - 'org.slf4j.impl.StaticMarkerBinder', - //commons-logging optional dependencies - 'org.apache.avalon.framework.logger.Logger', - 'org.apache.log.Hierarchy', - 'org.apache.log.Logger', - 'org.apache.log4j.Level', - 'org.apache.log4j.Logger', - 'org.apache.log4j.Priority', - //commons-logging provided dependencies - 'javax.servlet.ServletContextEvent', - 'javax.servlet.ServletContextListener' -) +thirdPartyAudit { + ignoreMissingClasses( + 'org.conscrypt.Conscrypt', + 'org.slf4j.impl.StaticLoggerBinder', + 'org.slf4j.impl.StaticMDCBinder', + 'org.slf4j.impl.StaticMarkerBinder', + //commons-logging optional dependencies + 'org.apache.avalon.framework.logger.Logger', + 'org.apache.log.Hierarchy', + 'org.apache.log.Logger', + 'org.apache.log4j.Level', + 'org.apache.log4j.Logger', + 'org.apache.log4j.Priority', + //commons-logging provided dependencies + 'javax.servlet.ServletContextEvent', + 'javax.servlet.ServletContextListener', + 'io.micrometer.context.ContextAccessor', + 'io.micrometer.context.ContextRegistry', + 'io.micrometer.context.ContextSnapshot', + 'io.micrometer.context.ContextSnapshot$Scope', + 'io.micrometer.context.ContextSnapshotFactory', + 'io.micrometer.context.ContextSnapshotFactory$Builder', + 'io.micrometer.context.ThreadLocalAccessor', + 'io.micrometer.core.instrument.Clock', + 'io.micrometer.core.instrument.Counter', + 'io.micrometer.core.instrument.Counter$Builder', + 'io.micrometer.core.instrument.DistributionSummary', + 'io.micrometer.core.instrument.DistributionSummary$Builder', + 'io.micrometer.core.instrument.Meter', + 'io.micrometer.core.instrument.MeterRegistry', + 'io.micrometer.core.instrument.Metrics', + 'io.micrometer.core.instrument.Tag', + 'io.micrometer.core.instrument.Tags', + 'io.micrometer.core.instrument.Timer', + 'io.micrometer.core.instrument.Timer$Builder', + 'io.micrometer.core.instrument.Timer$Sample', + 'io.micrometer.core.instrument.binder.jvm.ExecutorServiceMetrics', + 'io.micrometer.core.instrument.composite.CompositeMeterRegistry', + 'io.micrometer.core.instrument.search.Search', + 'reactor.blockhound.BlockHound$Builder', + 'reactor.blockhound.integration.BlockHoundIntegration' + ) + ignoreViolations( + 'reactor.core.publisher.Traces$SharedSecretsCallSiteSupplierFactory$TracingException' + ) +} tasks.withType(JavaCompile) { // Suppressing '[options] target value 8 is obsolete and will be removed in a future release' diff --git a/client/rest/licenses/httpclient5-5.2.1.jar.sha1 b/client/rest/licenses/httpclient5-5.2.1.jar.sha1 deleted file mode 100644 index 3555fe22f8e12..0000000000000 --- a/client/rest/licenses/httpclient5-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0c900514d3446d9ce5d9dbd90c21192048125440 \ No newline at end of file diff --git a/client/rest/licenses/httpclient5-5.2.3.jar.sha1 b/client/rest/licenses/httpclient5-5.2.3.jar.sha1 new file mode 100644 index 0000000000000..43e233e72001a --- /dev/null +++ b/client/rest/licenses/httpclient5-5.2.3.jar.sha1 @@ -0,0 +1 @@ +5d753a99d299756998a08c488f2efdf9cf26198e \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-5.2.2.jar.sha1 b/client/rest/licenses/httpcore5-5.2.2.jar.sha1 deleted file mode 100644 index b641256c7d4a4..0000000000000 --- a/client/rest/licenses/httpcore5-5.2.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6da28f5aa6c2b129ef49632e041a5203ce7507b2 \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-5.2.5.jar.sha1 b/client/rest/licenses/httpcore5-5.2.5.jar.sha1 new file mode 100644 index 0000000000000..ca97e8612ea39 --- /dev/null +++ b/client/rest/licenses/httpcore5-5.2.5.jar.sha1 @@ -0,0 +1 @@ +dab1e18842971a45ca8942491ce005ab86a028d7 \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-h2-5.2.2.jar.sha1 b/client/rest/licenses/httpcore5-h2-5.2.2.jar.sha1 deleted file mode 100644 index 94bc0fa49bdb0..0000000000000 --- a/client/rest/licenses/httpcore5-h2-5.2.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -54ee1ed58fe8ac40be1083ea9873a6c734939ab9 \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-h2-5.2.5.jar.sha1 b/client/rest/licenses/httpcore5-h2-5.2.5.jar.sha1 new file mode 100644 index 0000000000000..bb40fe65854f6 --- /dev/null +++ b/client/rest/licenses/httpcore5-h2-5.2.5.jar.sha1 @@ -0,0 +1 @@ +09425df4d1365cee86a8e031a036bdca4343da4b \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-reactive-5.2.5.jar.sha1 b/client/rest/licenses/httpcore5-reactive-5.2.5.jar.sha1 new file mode 100644 index 0000000000000..ab9241fc93d45 --- /dev/null +++ b/client/rest/licenses/httpcore5-reactive-5.2.5.jar.sha1 @@ -0,0 +1 @@ +f68949965075b957c12b4c1ef89fd4bab2a0fdb1 \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-reactive-LICENSE.txt b/client/rest/licenses/httpcore5-reactive-LICENSE.txt new file mode 100644 index 0000000000000..32f01eda18fe9 --- /dev/null +++ b/client/rest/licenses/httpcore5-reactive-LICENSE.txt @@ -0,0 +1,558 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +========================================================================= + +This project includes Public Suffix List copied from + +licensed under the terms of the Mozilla Public License, v. 2.0 + +Full license text: + +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/client/rest/licenses/httpcore5-reactive-NOTICE.txt b/client/rest/licenses/httpcore5-reactive-NOTICE.txt new file mode 100644 index 0000000000000..fcf14beb5c1ec --- /dev/null +++ b/client/rest/licenses/httpcore5-reactive-NOTICE.txt @@ -0,0 +1,8 @@ + +Apache HttpComponents Core Reactive Extensions +Copyright 2005-2021 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + diff --git a/client/rest/licenses/reactive-streams-1.0.4.jar.sha1 b/client/rest/licenses/reactive-streams-1.0.4.jar.sha1 new file mode 100644 index 0000000000000..45a80e3f7e361 --- /dev/null +++ b/client/rest/licenses/reactive-streams-1.0.4.jar.sha1 @@ -0,0 +1 @@ +3864a1320d97d7b045f729a326e1e077661f31b7 \ No newline at end of file diff --git a/client/rest/licenses/reactive-streams-LICENSE.txt b/client/rest/licenses/reactive-streams-LICENSE.txt new file mode 100644 index 0000000000000..1e3c7e7c77495 --- /dev/null +++ b/client/rest/licenses/reactive-streams-LICENSE.txt @@ -0,0 +1,21 @@ +MIT No Attribution + +Copyright 2014 Reactive Streams + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/client/rest/licenses/reactive-streams-NOTICE.txt b/client/rest/licenses/reactive-streams-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/client/rest/licenses/reactor-core-3.5.19.jar.sha1 b/client/rest/licenses/reactor-core-3.5.19.jar.sha1 new file mode 100644 index 0000000000000..04b59d2faae04 --- /dev/null +++ b/client/rest/licenses/reactor-core-3.5.19.jar.sha1 @@ -0,0 +1 @@ +1d49ce1d0df79f28d3927da5f4c46a895b94335f \ No newline at end of file diff --git a/client/rest/licenses/reactor-core-LICENSE.txt b/client/rest/licenses/reactor-core-LICENSE.txt new file mode 100644 index 0000000000000..e5583c184e67a --- /dev/null +++ b/client/rest/licenses/reactor-core-LICENSE.txt @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/client/rest/licenses/reactor-core-NOTICE.txt b/client/rest/licenses/reactor-core-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/client/rest/src/main/java/org/opensearch/client/Cancellable.java b/client/rest/src/main/java/org/opensearch/client/Cancellable.java index 56e31a3742f35..d087c60927e3e 100644 --- a/client/rest/src/main/java/org/opensearch/client/Cancellable.java +++ b/client/rest/src/main/java/org/opensearch/client/Cancellable.java @@ -34,6 +34,8 @@ import org.apache.hc.client5.http.classic.methods.HttpUriRequestBase; import org.apache.hc.core5.concurrent.CancellableDependency; +import java.io.IOException; +import java.util.concurrent.Callable; import java.util.concurrent.CancellationException; /** @@ -77,7 +79,7 @@ public synchronized boolean cancel() { } /** - * Executes some arbitrary code iff the on-going request has not been cancelled, otherwise throws {@link CancellationException}. + * Executes some arbitrary code if the on-going request has not been cancelled, otherwise throws {@link CancellationException}. * This is needed to guarantee that cancelling a request works correctly even in case {@link #cancel()} is called between different * attempts of the same request. The low-level client reuses the same instance of the {@link CancellableDependency} by calling * {@link HttpUriRequestBase#reset()} between subsequent retries. The {@link #cancel()} method can be called at anytime, @@ -95,6 +97,31 @@ synchronized void runIfNotCancelled(Runnable runnable) { runnable.run(); } + /** + * Executes some arbitrary code if the on-going request has not been cancelled, otherwise throws {@link CancellationException}. + * This is needed to guarantee that cancelling a request works correctly even in case {@link #cancel()} is called between different + * attempts of the same request. The low-level client reuses the same instance of the {@link CancellableDependency} by calling + * {@link HttpUriRequestBase#reset()} between subsequent retries. The {@link #cancel()} method can be called at anytime, + * and we need to handle the case where it gets called while there is no request being executed as one attempt may have failed and + * the subsequent attempt has not been started yet. + * If the request has already been cancelled we don't go ahead with the next attempt, and artificially raise the + * {@link CancellationException}, otherwise we run the provided {@link Runnable} which will reset the request and send the next attempt. + * Note that this method must be synchronized as well as the {@link #cancel()} method, to prevent a request from being cancelled + * when there is no future to cancel, which would make cancelling the request a no-op. + */ + synchronized T callIfNotCancelled(Callable callable) throws IOException { + if (this.httpRequest.isCancelled()) { + throw newCancellationException(); + } + try { + return callable.call(); + } catch (final IOException ex) { + throw ex; + } catch (final Exception ex) { + throw new IOException(ex); + } + } + static CancellationException newCancellationException() { return new CancellationException("request was cancelled"); } diff --git a/client/rest/src/main/java/org/opensearch/client/Response.java b/client/rest/src/main/java/org/opensearch/client/Response.java index b062d937ed630..cb92e33e49156 100644 --- a/client/rest/src/main/java/org/opensearch/client/Response.java +++ b/client/rest/src/main/java/org/opensearch/client/Response.java @@ -40,11 +40,8 @@ import org.apache.hc.core5.http.message.RequestLine; import org.apache.hc.core5.http.message.StatusLine; -import java.util.ArrayList; import java.util.List; import java.util.Objects; -import java.util.regex.Matcher; -import java.util.regex.Pattern; /** * Holds an opensearch response. It wraps the {@link HttpResponse} returned and associates it with @@ -116,79 +113,11 @@ public HttpEntity getEntity() { return response.getEntity(); } - /** - * Optimized regular expression to test if a string matches the RFC 1123 date - * format (with quotes and leading space). Start/end of line characters and - * atomic groups are used to prevent backtracking. - */ - private static final Pattern WARNING_HEADER_DATE_PATTERN = Pattern.compile("^ " + // start of line, leading space - // quoted RFC 1123 date format - "\"" + // opening quote - "(?>Mon|Tue|Wed|Thu|Fri|Sat|Sun), " + // day of week, atomic group to prevent backtracking - "\\d{2} " + // 2-digit day - "(?>Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) " + // month, atomic group to prevent backtracking - "\\d{4} " + // 4-digit year - "\\d{2}:\\d{2}:\\d{2} " + // (two-digit hour):(two-digit minute):(two-digit second) - "GMT" + // GMT - "\"$"); // closing quote (optional, since an older version can still send a warn-date), end of line - - /** - * Length of RFC 1123 format (with quotes and leading space), used in - * matchWarningHeaderPatternByPrefix(String). - */ - private static final int WARNING_HEADER_DATE_LENGTH = 0 + 1 + 1 + 3 + 1 + 1 + 2 + 1 + 3 + 1 + 4 + 1 + 2 + 1 + 2 + 1 + 2 + 1 + 3 + 1; - - /** - * Tests if a string matches the RFC 7234 specification for warning headers. - * This assumes that the warn code is always 299 and the warn agent is always - * OpenSearch. - * - * @param s the value of a warning header formatted according to RFC 7234 - * @return {@code true} if the input string matches the specification - */ - private static boolean matchWarningHeaderPatternByPrefix(final String s) { - return s.startsWith("299 OpenSearch-"); - } - - /** - * Refer to org.opensearch.common.logging.DeprecationLogger - */ - private static String extractWarningValueFromWarningHeader(final String s) { - String warningHeader = s; - - /* - * The following block tests for the existence of a RFC 1123 date in the warning header. If the date exists, it is removed for - * extractWarningValueFromWarningHeader(String) to work properly (as it does not handle dates). - */ - if (s.length() > WARNING_HEADER_DATE_LENGTH) { - final String possibleDateString = s.substring(s.length() - WARNING_HEADER_DATE_LENGTH); - final Matcher matcher = WARNING_HEADER_DATE_PATTERN.matcher(possibleDateString); - - if (matcher.matches()) { - warningHeader = warningHeader.substring(0, s.length() - WARNING_HEADER_DATE_LENGTH); - } - } - - final int firstQuote = warningHeader.indexOf('\"'); - final int lastQuote = warningHeader.length() - 1; - final String warningValue = warningHeader.substring(firstQuote + 1, lastQuote); - return warningValue; - } - /** * Returns a list of all warning headers returned in the response. */ public List getWarnings() { - List warnings = new ArrayList<>(); - for (Header header : response.getHeaders("Warning")) { - String warning = header.getValue(); - if (matchWarningHeaderPatternByPrefix(warning)) { - warnings.add(extractWarningValueFromWarningHeader(warning)); - } else { - warnings.add(warning); - } - } - return warnings; + return ResponseWarningsExtractor.getWarnings(response); } /** diff --git a/client/rest/src/main/java/org/opensearch/client/ResponseWarningsExtractor.java b/client/rest/src/main/java/org/opensearch/client/ResponseWarningsExtractor.java new file mode 100644 index 0000000000000..441daff4f3af4 --- /dev/null +++ b/client/rest/src/main/java/org/opensearch/client/ResponseWarningsExtractor.java @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.client; + +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpResponse; + +import java.util.ArrayList; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +final class ResponseWarningsExtractor { + + /** + * Optimized regular expression to test if a string matches the RFC 1123 date + * format (with quotes and leading space). Start/end of line characters and + * atomic groups are used to prevent backtracking. + */ + private static final Pattern WARNING_HEADER_DATE_PATTERN = Pattern.compile("^ " + // start of line, leading space + // quoted RFC 1123 date format + "\"" + // opening quote + "(?>Mon|Tue|Wed|Thu|Fri|Sat|Sun), " + // day of week, atomic group to prevent backtracking + "\\d{2} " + // 2-digit day + "(?>Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) " + // month, atomic group to prevent backtracking + "\\d{4} " + // 4-digit year + "\\d{2}:\\d{2}:\\d{2} " + // (two-digit hour):(two-digit minute):(two-digit second) + "GMT" + // GMT + "\"$"); // closing quote (optional, since an older version can still send a warn-date), end of line + + /** + * Length of RFC 1123 format (with quotes and leading space), used in + * matchWarningHeaderPatternByPrefix(String). + */ + private static final int WARNING_HEADER_DATE_LENGTH = 0 + 1 + 1 + 3 + 1 + 1 + 2 + 1 + 3 + 1 + 4 + 1 + 2 + 1 + 2 + 1 + 2 + 1 + 3 + 1; + + private ResponseWarningsExtractor() {} + + /** + * Returns a list of all warning headers returned in the response. + * @param response HTTP response + */ + static List getWarnings(final HttpResponse response) { + List warnings = new ArrayList<>(); + for (Header header : response.getHeaders("Warning")) { + String warning = header.getValue(); + if (matchWarningHeaderPatternByPrefix(warning)) { + warnings.add(extractWarningValueFromWarningHeader(warning)); + } else { + warnings.add(warning); + } + } + return warnings; + } + + /** + * Tests if a string matches the RFC 7234 specification for warning headers. + * This assumes that the warn code is always 299 and the warn agent is always + * OpenSearch. + * + * @param s the value of a warning header formatted according to RFC 7234 + * @return {@code true} if the input string matches the specification + */ + private static boolean matchWarningHeaderPatternByPrefix(final String s) { + return s.startsWith("299 OpenSearch-"); + } + + /** + * Refer to org.opensearch.common.logging.DeprecationLogger + */ + private static String extractWarningValueFromWarningHeader(final String s) { + String warningHeader = s; + + /* + * The following block tests for the existence of a RFC 1123 date in the warning header. If the date exists, it is removed for + * extractWarningValueFromWarningHeader(String) to work properly (as it does not handle dates). + */ + if (s.length() > WARNING_HEADER_DATE_LENGTH) { + final String possibleDateString = s.substring(s.length() - WARNING_HEADER_DATE_LENGTH); + final Matcher matcher = WARNING_HEADER_DATE_PATTERN.matcher(possibleDateString); + + if (matcher.matches()) { + warningHeader = warningHeader.substring(0, s.length() - WARNING_HEADER_DATE_LENGTH); + } + } + + final int firstQuote = warningHeader.indexOf('\"'); + final int lastQuote = warningHeader.length() - 1; + final String warningValue = warningHeader.substring(firstQuote + 1, lastQuote); + return warningValue; + } + +} diff --git a/client/rest/src/main/java/org/opensearch/client/RestClient.java b/client/rest/src/main/java/org/opensearch/client/RestClient.java index 15905add76c4f..5c87e3fda5701 100644 --- a/client/rest/src/main/java/org/opensearch/client/RestClient.java +++ b/client/rest/src/main/java/org/opensearch/client/RestClient.java @@ -62,14 +62,19 @@ import org.apache.hc.core5.http.HttpEntity; import org.apache.hc.core5.http.HttpHost; import org.apache.hc.core5.http.HttpRequest; +import org.apache.hc.core5.http.HttpResponse; +import org.apache.hc.core5.http.Message; import org.apache.hc.core5.http.io.entity.HttpEntityWrapper; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; import org.apache.hc.core5.http.message.RequestLine; import org.apache.hc.core5.http.nio.AsyncRequestProducer; import org.apache.hc.core5.http.nio.AsyncResponseConsumer; import org.apache.hc.core5.net.URIBuilder; +import org.apache.hc.core5.reactive.ReactiveResponseConsumer; import org.apache.hc.core5.reactor.IOReactorStatus; import org.apache.hc.core5.util.Args; import org.opensearch.client.http.HttpUriRequestProducer; +import org.opensearch.client.http.ReactiveHttpUriRequestProducer; import javax.net.ssl.SSLHandshakeException; @@ -83,6 +88,7 @@ import java.net.SocketTimeoutException; import java.net.URI; import java.net.URISyntaxException; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Arrays; import java.util.Base64; @@ -98,6 +104,7 @@ import java.util.Objects; import java.util.Optional; import java.util.Set; +import java.util.concurrent.CancellationException; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ExecutionException; @@ -106,6 +113,10 @@ import java.util.stream.Collectors; import java.util.zip.GZIPOutputStream; +import org.reactivestreams.Publisher; +import reactor.core.publisher.Mono; +import reactor.core.publisher.MonoSink; + import static java.nio.charset.StandardCharsets.UTF_8; import static java.util.Collections.singletonList; @@ -300,6 +311,23 @@ public boolean isRunning() { return client.getStatus() == IOReactorStatus.ACTIVE; } + /** + * Sends a streaming request to the OpenSearch cluster that the client points to and returns streaming response. This is an experimental API. + * @param request streaming request + * @return streaming response + * @throws IOException IOException + */ + public StreamingResponse streamRequest(StreamingRequest request) throws IOException { + final InternalStreamingRequest internalRequest = new InternalStreamingRequest(request); + + final StreamingResponse response = new StreamingResponse<>( + new RequestLine(internalRequest.httpRequest), + streamRequest(nextNodes(), internalRequest) + ); + + return response; + } + /** * Sends a request to the OpenSearch cluster that the client points to. * Blocks until the request is completed and returns its response or fails @@ -332,13 +360,13 @@ public Response performRequest(Request request) throws IOException { private Response performRequest(final NodeTuple> nodeTuple, final InternalRequest request, Exception previousException) throws IOException { - RequestContext context = request.createContextForNextAttempt(nodeTuple.nodes.next(), nodeTuple.authCache); + RequestContext context = request.createContextForNextAttempt(nodeTuple.nodes.next(), nodeTuple.authCache); ClassicHttpResponse httpResponse; try { - httpResponse = client.execute(context.requestProducer, context.asyncResponseConsumer, context.context, null).get(); + httpResponse = client.execute(context.requestProducer(), context.asyncResponseConsumer(), context.context(), null).get(); } catch (Exception e) { - RequestLogger.logFailedRequest(logger, request.httpRequest, context.node, e); - onFailure(context.node); + RequestLogger.logFailedRequest(logger, request.httpRequest, context.node(), e); + onFailure(context.node()); Exception cause = extractAndWrapCause(e); addSuppressedException(previousException, cause); if (nodeTuple.nodes.hasNext()) { @@ -352,7 +380,7 @@ private Response performRequest(final NodeTuple> nodeTuple, final } throw new IllegalStateException("unexpected exception type: must be either RuntimeException or IOException", cause); } - ResponseOrResponseException responseOrResponseException = convertResponse(request, context.node, httpResponse); + ResponseOrResponseException responseOrResponseException = convertResponse(request, context.node(), httpResponse); if (responseOrResponseException.responseException == null) { return responseOrResponseException.response; } @@ -363,6 +391,46 @@ private Response performRequest(final NodeTuple> nodeTuple, final throw responseOrResponseException.responseException; } + private Publisher>> streamRequest( + final NodeTuple> nodeTuple, + final InternalStreamingRequest request + ) throws IOException { + return request.cancellable.callIfNotCancelled(() -> { + final Node node = nodeTuple.nodes.next(); + + final Mono>> publisher = Mono.create(emitter -> { + final RequestContext context = request.createContextForNextAttempt(node, nodeTuple.authCache, emitter); + final Future future = client.execute( + context.requestProducer(), + context.asyncResponseConsumer(), + context.context(), + null + ); + + if (future instanceof org.apache.hc.core5.concurrent.Cancellable) { + request.httpRequest.setDependency((org.apache.hc.core5.concurrent.Cancellable) future); + } + }); + + return publisher.flatMap(message -> { + try { + final ResponseOrResponseException responseOrResponseException = convertResponse(request, node, message); + if (responseOrResponseException.responseException == null) { + return Mono.just(message); + } else { + if (nodeTuple.nodes.hasNext()) { + return Mono.from(streamRequest(nodeTuple, request)); + } else { + return Mono.error(responseOrResponseException.responseException); + } + } + } catch (final Exception ex) { + return Mono.error(ex); + } + }); + }); + } + private ResponseOrResponseException convertResponse(InternalRequest request, Node node, ClassicHttpResponse httpResponse) throws IOException { RequestLogger.logResponse(logger, request.httpRequest, node.getHost(), httpResponse); @@ -393,6 +461,40 @@ private ResponseOrResponseException convertResponse(InternalRequest request, Nod throw responseException; } + private ResponseOrResponseException convertResponse( + InternalStreamingRequest request, + Node node, + Message> message + ) throws IOException { + + // Streaming Response could accumulate a lot of data so we may not be able to fully consume it. + final ClassicHttpResponse httpResponse = new BasicClassicHttpResponse( + message.getHead().getCode(), + message.getHead().getReasonPhrase() + ); + final Response response = new Response(new RequestLine(request.httpRequest), node.getHost(), httpResponse); + + RequestLogger.logResponse(logger, request.httpRequest, node.getHost(), httpResponse); + int statusCode = httpResponse.getCode(); + + if (isSuccessfulResponse(statusCode) || request.ignoreErrorCodes.contains(response.getStatusLine().getStatusCode())) { + onResponse(node); + if (request.warningsHandler.warningsShouldFailRequest(response.getWarnings())) { + throw new WarningFailureException(response); + } + return new ResponseOrResponseException(response); + } + ResponseException responseException = new ResponseException(response); + if (isRetryStatus(statusCode)) { + // mark host dead and retry against next one + onFailure(node); + return new ResponseOrResponseException(responseException); + } + // mark host alive and don't retry, as the error should be a request problem + onResponse(node); + throw responseException; + } + /** * Sends a request to the OpenSearch cluster that the client points to. * The request is executed asynchronously and the provided @@ -427,16 +529,23 @@ private void performRequestAsync( final FailureTrackingResponseListener listener ) { request.cancellable.runIfNotCancelled(() -> { - final RequestContext context = request.createContextForNextAttempt(nodeTuple.nodes.next(), nodeTuple.authCache); + final RequestContext context = request.createContextForNextAttempt( + nodeTuple.nodes.next(), + nodeTuple.authCache + ); Future future = client.execute( - context.requestProducer, - context.asyncResponseConsumer, - context.context, + context.requestProducer(), + context.asyncResponseConsumer(), + context.context(), new FutureCallback() { @Override public void completed(ClassicHttpResponse httpResponse) { try { - ResponseOrResponseException responseOrResponseException = convertResponse(request, context.node, httpResponse); + ResponseOrResponseException responseOrResponseException = convertResponse( + request, + context.node(), + httpResponse + ); if (responseOrResponseException.responseException == null) { listener.onSuccess(responseOrResponseException.response); } else { @@ -455,8 +564,8 @@ public void completed(ClassicHttpResponse httpResponse) { @Override public void failed(Exception failure) { try { - RequestLogger.logFailedRequest(logger, request.httpRequest, context.node, failure); - onFailure(context.node); + RequestLogger.logFailedRequest(logger, request.httpRequest, context.node(), failure); + onFailure(context.node()); if (nodeTuple.nodes.hasNext()) { listener.trackFailure(failure); performRequestAsync(nodeTuple, request, listener); @@ -822,6 +931,66 @@ public void remove() { } } + private class InternalStreamingRequest { + private final StreamingRequest request; + private final Set ignoreErrorCodes; + private final HttpUriRequestBase httpRequest; + private final Cancellable cancellable; + private final WarningsHandler warningsHandler; + + InternalStreamingRequest(StreamingRequest request) { + this.request = request; + Map params = new HashMap<>(request.getParameters()); + // ignore is a special parameter supported by the clients, shouldn't be sent to es + String ignoreString = params.remove("ignore"); + this.ignoreErrorCodes = getIgnoreErrorCodes(ignoreString, request.getMethod()); + URI uri = buildUri(pathPrefix, request.getEndpoint(), params); + this.httpRequest = createHttpRequest(request.getMethod(), uri, null); + this.cancellable = Cancellable.fromRequest(httpRequest); + setHeaders(httpRequest, request.getOptions().getHeaders()); + setRequestConfig(httpRequest, request.getOptions().getRequestConfig()); + this.warningsHandler = request.getOptions().getWarningsHandler() == null + ? RestClient.this.warningsHandler + : request.getOptions().getWarningsHandler(); + } + + private void setHeaders(HttpRequest httpRequest, Collection
requestHeaders) { + // request headers override default headers, so we don't add default headers if they exist as request headers + final Set requestNames = new HashSet<>(requestHeaders.size()); + for (Header requestHeader : requestHeaders) { + httpRequest.addHeader(requestHeader); + requestNames.add(requestHeader.getName()); + } + for (Header defaultHeader : defaultHeaders) { + if (requestNames.contains(defaultHeader.getName()) == false) { + httpRequest.addHeader(defaultHeader); + } + } + if (compressionEnabled) { + httpRequest.addHeader("Accept-Encoding", "gzip"); + } + } + + private void setRequestConfig(HttpUriRequestBase httpRequest, RequestConfig requestConfig) { + if (requestConfig != null) { + httpRequest.setConfig(requestConfig); + } + } + + public Publisher getPublisher() { + return request.getBody(); + } + + RequestContext createContextForNextAttempt( + Node node, + AuthCache authCache, + MonoSink>> emitter + ) { + this.httpRequest.reset(); + return new ReactiveRequestContext(this, node, authCache, emitter); + } + } + private class InternalRequest { private final Request request; private final Set ignoreErrorCodes; @@ -868,12 +1037,22 @@ private void setRequestConfig(HttpUriRequestBase httpRequest, RequestConfig requ } } - RequestContext createContextForNextAttempt(Node node, AuthCache authCache) { + RequestContext createContextForNextAttempt(Node node, AuthCache authCache) { this.httpRequest.reset(); - return new RequestContext(this, node, authCache); + return new AsyncRequestContext(this, node, authCache); } } + private interface RequestContext { + Node node(); + + AsyncRequestProducer requestProducer(); + + AsyncResponseConsumer asyncResponseConsumer(); + + HttpClientContext context(); + } + /** * The Apache HttpClient 5 adds "Authorization" header even if the credentials for basic authentication are not provided * (effectively, username and password are 'null'). To workaround that, wrapping the AuthCache around current HttpClientContext @@ -934,13 +1113,73 @@ public void clear() { } - private static class RequestContext { + private static class ReactiveRequestContext implements RequestContext { + private final Node node; + private final AsyncRequestProducer requestProducer; + private final AsyncResponseConsumer asyncResponseConsumer; + private final HttpClientContext context; + + ReactiveRequestContext( + InternalStreamingRequest request, + Node node, + AuthCache authCache, + MonoSink>> emitter + ) { + this.node = node; + // we stream the request body if the entity allows for it + this.requestProducer = ReactiveHttpUriRequestProducer.create(request.httpRequest, node.getHost(), request.getPublisher()); + this.asyncResponseConsumer = new ReactiveResponseConsumer(new FutureCallback>>() { + @Override + public void failed(Exception ex) { + emitter.error(ex); + } + + @Override + public void completed(Message> result) { + if (result == null) { + emitter.success(); + } else { + emitter.success(result); + } + } + + @Override + public void cancelled() { + failed(new CancellationException("Future cancelled")); + } + }); + this.context = HttpClientContext.create(); + context.setAuthCache(new WrappingAuthCache(context, authCache)); + } + + @Override + public AsyncResponseConsumer asyncResponseConsumer() { + return asyncResponseConsumer; + } + + @Override + public HttpClientContext context() { + return context; + } + + @Override + public Node node() { + return node; + } + + @Override + public AsyncRequestProducer requestProducer() { + return requestProducer; + } + } + + private static class AsyncRequestContext implements RequestContext { private final Node node; private final AsyncRequestProducer requestProducer; private final AsyncResponseConsumer asyncResponseConsumer; private final HttpClientContext context; - RequestContext(InternalRequest request, Node node, AuthCache authCache) { + AsyncRequestContext(InternalRequest request, Node node, AuthCache authCache) { this.node = node; // we stream the request body if the entity allows for it this.requestProducer = HttpUriRequestProducer.create(request.httpRequest, node.getHost()); @@ -950,6 +1189,26 @@ private static class RequestContext { this.context = HttpClientContext.create(); context.setAuthCache(new WrappingAuthCache(context, authCache)); } + + @Override + public AsyncResponseConsumer asyncResponseConsumer() { + return asyncResponseConsumer; + } + + @Override + public HttpClientContext context() { + return context; + } + + @Override + public Node node() { + return node; + } + + @Override + public AsyncRequestProducer requestProducer() { + return requestProducer; + } } private static Set getIgnoreErrorCodes(String ignoreString, String requestMethod) { diff --git a/client/rest/src/main/java/org/opensearch/client/StreamingRequest.java b/client/rest/src/main/java/org/opensearch/client/StreamingRequest.java new file mode 100644 index 0000000000000..e1767407b1353 --- /dev/null +++ b/client/rest/src/main/java/org/opensearch/client/StreamingRequest.java @@ -0,0 +1,114 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.client; + +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +import org.reactivestreams.Publisher; + +import static java.util.Collections.unmodifiableMap; + +/** + * HTTP Streaming Request to OpenSearch. This is an experimental API. + */ +public class StreamingRequest { + private final String method; + private final String endpoint; + private final Map parameters = new HashMap<>(); + + private RequestOptions options = RequestOptions.DEFAULT; + private final Publisher publisher; + + /** + * Constructor + * @param method method + * @param endpoint endpoint + * @param publisher publisher + */ + public StreamingRequest(String method, String endpoint, Publisher publisher) { + this.method = method; + this.endpoint = endpoint; + this.publisher = publisher; + } + + /** + * Get endpoint + * @return endpoint + */ + public String getEndpoint() { + return endpoint; + } + + /** + * Get method + * @return method + */ + public String getMethod() { + return method; + } + + /** + * Get options + * @return options + */ + public RequestOptions getOptions() { + return options; + } + + /** + * Get parameters + * @return parameters + */ + public Map getParameters() { + if (options.getParameters().isEmpty()) { + return unmodifiableMap(parameters); + } else { + Map combinedParameters = new HashMap<>(parameters); + combinedParameters.putAll(options.getParameters()); + return unmodifiableMap(combinedParameters); + } + } + + /** + * Add a query string parameter. + * @param name the name of the url parameter. Must not be null. + * @param value the value of the url url parameter. If {@code null} then + * the parameter is sent as {@code name} rather than {@code name=value} + * @throws IllegalArgumentException if a parameter with that name has + * already been set + */ + public void addParameter(String name, String value) { + Objects.requireNonNull(name, "url parameter name cannot be null"); + if (parameters.containsKey(name)) { + throw new IllegalArgumentException("url parameter [" + name + "] has already been set to [" + parameters.get(name) + "]"); + } else { + parameters.put(name, value); + } + } + + /** + * Add query parameters using the provided map of key value pairs. + * + * @param paramSource a map of key value pairs where the key is the url parameter. + * @throws IllegalArgumentException if a parameter with that name has already been set. + */ + public void addParameters(Map paramSource) { + paramSource.forEach(this::addParameter); + } + + /** + * Body publisher + * @return body publisher + */ + public Publisher getBody() { + return publisher; + } +} diff --git a/client/rest/src/main/java/org/opensearch/client/StreamingResponse.java b/client/rest/src/main/java/org/opensearch/client/StreamingResponse.java new file mode 100644 index 0000000000000..87d404c115723 --- /dev/null +++ b/client/rest/src/main/java/org/opensearch/client/StreamingResponse.java @@ -0,0 +1,96 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.client; + +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.HttpResponse; +import org.apache.hc.core5.http.Message; +import org.apache.hc.core5.http.message.RequestLine; +import org.apache.hc.core5.http.message.StatusLine; + +import java.util.List; + +import org.reactivestreams.Publisher; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +/** + * HTTP Streaming Response from OpenSearch. This is an experimental API. + */ +public class StreamingResponse { + private final RequestLine requestLine; + private final Mono>> publisher; + private volatile HttpHost host; + + /** + * Constructor + * @param requestLine request line + * @param publisher message publisher(response with a body) + */ + public StreamingResponse(RequestLine requestLine, Publisher>> publisher) { + this.requestLine = requestLine; + // We cache the publisher here so the body or / and HttpResponse could + // be consumed independently or/and more than once. + this.publisher = Mono.from(publisher).cache(); + } + + /** + * Set host + * @param host host + */ + public void setHost(HttpHost host) { + this.host = host; + } + + /** + * Get request line + * @return request line + */ + public RequestLine getRequestLine() { + return requestLine; + } + + /** + * Get host + * @return host + */ + public HttpHost getHost() { + return host; + } + + /** + * Get response boby {@link Publisher} + * @return response boby {@link Publisher} + */ + public Publisher getBody() { + return publisher.flatMapMany(m -> Flux.from(m.getBody())); + } + + /** + * Returns the status line of the current response + */ + public StatusLine getStatusLine() { + return new StatusLine( + publisher.map(Message::getHead) + .onErrorResume(ResponseException.class, e -> Mono.just(e.getResponse().getHttpResponse())) + .block() + ); + } + + /** + * Returns a list of all warning headers returned in the response. + */ + public List getWarnings() { + return ResponseWarningsExtractor.getWarnings( + publisher.map(Message::getHead) + .onErrorResume(ResponseException.class, e -> Mono.just(e.getResponse().getHttpResponse())) + .block() + ); + } +} diff --git a/client/rest/src/main/java/org/opensearch/client/http/ReactiveHttpUriRequestProducer.java b/client/rest/src/main/java/org/opensearch/client/http/ReactiveHttpUriRequestProducer.java new file mode 100644 index 0000000000000..63a71e29b8b31 --- /dev/null +++ b/client/rest/src/main/java/org/opensearch/client/http/ReactiveHttpUriRequestProducer.java @@ -0,0 +1,75 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.client.http; + +import org.apache.hc.client5.http.classic.methods.HttpUriRequestBase; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.nio.AsyncEntityProducer; +import org.apache.hc.core5.http.nio.support.BasicRequestProducer; +import org.apache.hc.core5.net.URIAuthority; +import org.apache.hc.core5.reactive.ReactiveEntityProducer; +import org.apache.hc.core5.util.Args; + +import java.nio.ByteBuffer; + +import org.reactivestreams.Publisher; + +/** + * The reactive producer of the {@link HttpUriRequestBase} instances associated with a particular {@link HttpHost} + */ +public class ReactiveHttpUriRequestProducer extends BasicRequestProducer { + private final HttpUriRequestBase request; + + ReactiveHttpUriRequestProducer(final HttpUriRequestBase request, final AsyncEntityProducer entityProducer) { + super(request, entityProducer); + this.request = request; + } + + /** + * Get the produced {@link HttpUriRequestBase} instance + * @return produced {@link HttpUriRequestBase} instance + */ + public HttpUriRequestBase getRequest() { + return request; + } + + /** + * Create new request producer for {@link HttpUriRequestBase} instance and {@link HttpHost} + * @param request {@link HttpUriRequestBase} instance + * @param host {@link HttpHost} instance + * @param publisher publisher + * @return new request producer + */ + public static ReactiveHttpUriRequestProducer create( + final HttpUriRequestBase request, + final HttpHost host, + Publisher publisher + ) { + Args.notNull(request, "Request"); + Args.notNull(host, "HttpHost"); + + // TODO: Should we copy request here instead of modifying in place? + request.setAuthority(new URIAuthority(host)); + request.setScheme(host.getSchemeName()); + + final Header contentTypeHeader = request.getFirstHeader("Content-Type"); + final ContentType contentType = (contentTypeHeader == null) + ? ContentType.APPLICATION_JSON + : ContentType.parse(contentTypeHeader.getValue()); + + final Header contentEncodingHeader = request.getFirstHeader("Content-Encoding"); + final String contentEncoding = (contentEncodingHeader == null) ? null : contentEncodingHeader.getValue(); + + final AsyncEntityProducer entityProducer = new ReactiveEntityProducer(publisher, -1, contentType, contentEncoding); + return new ReactiveHttpUriRequestProducer(request, entityProducer); + } + +} diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientTests.java index dd51da3a30d8c..f4f1c57cdd588 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientTests.java @@ -56,12 +56,15 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.function.Supplier; +import reactor.core.publisher.Mono; + import static java.util.Collections.singletonList; import static org.hamcrest.Matchers.instanceOf; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertSame; import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.Mockito.mock; @@ -418,6 +421,16 @@ public void testIsRunning() { assertFalse(restClient.isRunning()); } + public void testStreamWithUnsupportedMethod() throws Exception { + try (RestClient restClient = createRestClient()) { + final UnsupportedOperationException ex = assertThrows( + UnsupportedOperationException.class, + () -> restClient.streamRequest(new StreamingRequest<>("unsupported", randomAsciiLettersOfLength(5), Mono.empty())) + ); + assertEquals("http method not supported: unsupported", ex.getMessage()); + } + } + private static void assertNodes(NodeTuple> nodeTuple, AtomicInteger lastNodeIndex, int runs) throws IOException { int distance = lastNodeIndex.get() % nodeTuple.nodes.size(); /* diff --git a/client/sniffer/licenses/httpclient5-5.2.1.jar.sha1 b/client/sniffer/licenses/httpclient5-5.2.1.jar.sha1 deleted file mode 100644 index 3555fe22f8e12..0000000000000 --- a/client/sniffer/licenses/httpclient5-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0c900514d3446d9ce5d9dbd90c21192048125440 \ No newline at end of file diff --git a/client/sniffer/licenses/httpclient5-5.2.3.jar.sha1 b/client/sniffer/licenses/httpclient5-5.2.3.jar.sha1 new file mode 100644 index 0000000000000..43e233e72001a --- /dev/null +++ b/client/sniffer/licenses/httpclient5-5.2.3.jar.sha1 @@ -0,0 +1 @@ +5d753a99d299756998a08c488f2efdf9cf26198e \ No newline at end of file diff --git a/client/sniffer/licenses/httpcore5-5.2.2.jar.sha1 b/client/sniffer/licenses/httpcore5-5.2.2.jar.sha1 deleted file mode 100644 index b641256c7d4a4..0000000000000 --- a/client/sniffer/licenses/httpcore5-5.2.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6da28f5aa6c2b129ef49632e041a5203ce7507b2 \ No newline at end of file diff --git a/client/sniffer/licenses/httpcore5-5.2.5.jar.sha1 b/client/sniffer/licenses/httpcore5-5.2.5.jar.sha1 new file mode 100644 index 0000000000000..ca97e8612ea39 --- /dev/null +++ b/client/sniffer/licenses/httpcore5-5.2.5.jar.sha1 @@ -0,0 +1 @@ +dab1e18842971a45ca8942491ce005ab86a028d7 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/build.gradle b/plugins/transport-reactor-netty4/build.gradle index 1a94def3fdff1..089e57f062a9f 100644 --- a/plugins/transport-reactor-netty4/build.gradle +++ b/plugins/transport-reactor-netty4/build.gradle @@ -46,7 +46,7 @@ dependencies { api "io.projectreactor.netty:reactor-netty-core:${versions.reactor_netty}" testImplementation "org.apache.logging.log4j:log4j-slf4j-impl:${versions.log4j}" - testImplementation "io.projectreactor:reactor-test:${versions.reactor}" + javaRestTestImplementation "io.projectreactor:reactor-test:${versions.reactor}" testImplementation project(":modules:transport-netty4") } @@ -80,6 +80,10 @@ javaRestTest { systemProperty 'opensearch.set.netty.runtime.available.processors', 'false' } +testClusters.javaRestTest { + setting 'http.type', 'reactor-netty4' +} + thirdPartyAudit { ignoreMissingClasses( 'com.aayushatharva.brotli4j.Brotli4jLoader', diff --git a/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4BadRequestIT.java b/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4BadRequestIT.java new file mode 100644 index 0000000000000..62834483b5e9b --- /dev/null +++ b/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4BadRequestIT.java @@ -0,0 +1,115 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.rest; + +import org.opensearch.client.Request; +import org.opensearch.client.RequestOptions; +import org.opensearch.client.Response; +import org.opensearch.client.ResponseException; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.http.HttpTransportSettings; +import org.opensearch.test.rest.OpenSearchRestTestCase; +import org.opensearch.test.rest.yaml.ObjectPath; + +import java.io.IOException; +import java.nio.charset.Charset; +import java.util.Map; + +import static org.opensearch.core.rest.RestStatus.REQUEST_URI_TOO_LONG; +import static org.hamcrest.Matchers.equalTo; + +public class ReactorNetty4BadRequestIT extends OpenSearchRestTestCase { + + public void testBadRequest() throws IOException { + final Response response = client().performRequest(new Request("GET", "/_nodes/settings")); + final ObjectPath objectPath = ObjectPath.createFromResponse(response); + final Map map = objectPath.evaluate("nodes"); + int maxMaxInitialLineLength = Integer.MIN_VALUE; + final Setting httpMaxInitialLineLength = HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH; + final String key = httpMaxInitialLineLength.getKey().substring("http.".length()); + for (Map.Entry entry : map.entrySet()) { + @SuppressWarnings("unchecked") + final Map settings = (Map) ((Map) entry.getValue()).get("settings"); + final int maxIntialLineLength; + if (settings.containsKey("http")) { + @SuppressWarnings("unchecked") + final Map httpSettings = (Map) settings.get("http"); + if (httpSettings.containsKey(key)) { + maxIntialLineLength = ByteSizeValue.parseBytesSizeValue((String) httpSettings.get(key), key).bytesAsInt(); + } else { + maxIntialLineLength = httpMaxInitialLineLength.getDefault(Settings.EMPTY).bytesAsInt(); + } + } else { + maxIntialLineLength = httpMaxInitialLineLength.getDefault(Settings.EMPTY).bytesAsInt(); + } + maxMaxInitialLineLength = Math.max(maxMaxInitialLineLength, maxIntialLineLength); + } + + final String path = "/" + new String(new byte[maxMaxInitialLineLength], Charset.forName("UTF-8")).replace('\0', 'a'); + final ResponseException e = expectThrows( + ResponseException.class, + () -> client().performRequest(new Request(randomFrom("GET", "POST", "PUT"), path)) + ); + // The reactor-netty implementation does not provide a hook to customize or intercept request decoder errors at the moment (see + // please https://github.com/reactor/reactor-netty/issues/3327). + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(REQUEST_URI_TOO_LONG.getStatus())); + } + + public void testInvalidParameterValue() throws IOException { + final Request request = new Request("GET", "/_cluster/settings"); + request.addParameter("pretty", "neither-true-nor-false"); + final ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(request)); + final Response response = e.getResponse(); + assertThat(response.getStatusLine().getStatusCode(), equalTo(400)); + final ObjectPath objectPath = ObjectPath.createFromResponse(response); + final Map map = objectPath.evaluate("error"); + assertThat(map.get("type"), equalTo("illegal_argument_exception")); + assertThat(map.get("reason"), equalTo("Failed to parse value [neither-true-nor-false] as only [true] or [false] are allowed.")); + } + + public void testInvalidHeaderValue() throws IOException { + final Request request = new Request("GET", "/_cluster/settings"); + final RequestOptions.Builder options = request.getOptions().toBuilder(); + options.addHeader("Content-Type", "\t"); + request.setOptions(options); + final ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(request)); + final Response response = e.getResponse(); + assertThat(response.getStatusLine().getStatusCode(), equalTo(400)); + final ObjectPath objectPath = ObjectPath.createFromResponse(response); + final Map map = objectPath.evaluate("error"); + assertThat(map.get("type"), equalTo("content_type_header_exception")); + assertThat(map.get("reason"), equalTo("java.lang.IllegalArgumentException: invalid Content-Type header []")); + } +} diff --git a/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4HeadBodyIsEmptyIT.java b/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4HeadBodyIsEmptyIT.java new file mode 100644 index 0000000000000..663eb9ef6e946 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4HeadBodyIsEmptyIT.java @@ -0,0 +1,204 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.rest; + +import org.opensearch.client.Request; +import org.opensearch.client.Response; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.test.rest.OpenSearchRestTestCase; +import org.hamcrest.Matcher; + +import java.io.IOException; +import java.util.Map; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.core.rest.RestStatus.NOT_FOUND; +import static org.opensearch.core.rest.RestStatus.OK; +import static org.hamcrest.Matchers.greaterThan; + +public class ReactorNetty4HeadBodyIsEmptyIT extends OpenSearchRestTestCase { + public void testHeadRoot() throws IOException { + headTestCase("/", emptyMap(), greaterThan(0)); + headTestCase("/", singletonMap("pretty", ""), greaterThan(0)); + headTestCase("/", singletonMap("pretty", "true"), greaterThan(0)); + } + + private void createTestDoc() throws IOException { + createTestDoc("test"); + } + + private void createTestDoc(final String indexName) throws IOException { + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + { + builder.field("test", "test"); + } + builder.endObject(); + Request request = new Request("PUT", "/" + indexName + "/_doc/" + "1"); + request.setJsonEntity(builder.toString()); + client().performRequest(request); + } + } + + public void testDocumentExists() throws IOException { + createTestDoc(); + headTestCase("/test/_doc/1", emptyMap(), greaterThan(0)); + headTestCase("/test/_doc/1", singletonMap("pretty", "true"), greaterThan(0)); + headTestCase("/test/_doc/2", emptyMap(), NOT_FOUND.getStatus(), greaterThan(0)); + } + + public void testIndexExists() throws IOException { + createTestDoc(); + headTestCase("/test", emptyMap(), greaterThan(0)); + headTestCase("/test", singletonMap("pretty", "true"), greaterThan(0)); + } + + public void testAliasExists() throws IOException { + createTestDoc(); + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + { + builder.startArray("actions"); + { + builder.startObject(); + { + builder.startObject("add"); + { + builder.field("index", "test"); + builder.field("alias", "test_alias"); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endArray(); + } + builder.endObject(); + + Request request = new Request("POST", "/_aliases"); + request.setJsonEntity(builder.toString()); + client().performRequest(request); + headTestCase("/_alias/test_alias", emptyMap(), greaterThan(0)); + headTestCase("/test/_alias/test_alias", emptyMap(), greaterThan(0)); + } + } + + public void testAliasDoesNotExist() throws IOException { + createTestDoc(); + headTestCase("/_alias/test_alias", emptyMap(), NOT_FOUND.getStatus(), greaterThan(0)); + headTestCase("/test/_alias/test_alias", emptyMap(), NOT_FOUND.getStatus(), greaterThan(0)); + } + + public void testTemplateExists() throws IOException { + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + { + builder.array("index_patterns", "*"); + builder.startObject("settings"); + { + builder.field("number_of_replicas", 0); + } + builder.endObject(); + } + builder.endObject(); + + Request request = new Request("PUT", "/_template/template"); + request.setJsonEntity(builder.toString()); + client().performRequest(request); + headTestCase("/_template/template", emptyMap(), greaterThan(0)); + } + } + + public void testGetSourceAction() throws IOException { + createTestDoc(); + headTestCase("/test/_source/1", emptyMap(), greaterThan(0)); + headTestCase("/test/_source/2", emptyMap(), NOT_FOUND.getStatus(), greaterThan(0)); + + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + { + builder.startObject("mappings"); + { + builder.startObject("_source"); + { + builder.field("enabled", false); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + + Request request = new Request("PUT", "/test-no-source"); + request.setJsonEntity(builder.toString()); + client().performRequest(request); + createTestDoc("test-no-source"); + headTestCase("/test-no-source/_source/1", emptyMap(), NOT_FOUND.getStatus(), greaterThan(0)); + } + } + + public void testException() throws IOException { + /* + * This will throw an index not found exception which will be sent on the channel; previously when handling HEAD requests that would + * throw an exception, the content was swallowed and a content length header of zero was returned. Instead of swallowing the content + * we now let it rise up to the upstream channel so that it can compute the content length that would be returned. This test case is + * a test for this situation. + */ + headTestCase("/index-not-found-exception", emptyMap(), NOT_FOUND.getStatus(), greaterThan(0)); + } + + private void headTestCase(final String url, final Map params, final Matcher matcher) throws IOException { + headTestCase(url, params, OK.getStatus(), matcher); + } + + private void headTestCase( + final String url, + final Map params, + final int expectedStatusCode, + final Matcher matcher, + final String... expectedWarnings + ) throws IOException { + Request request = new Request("HEAD", url); + for (Map.Entry param : params.entrySet()) { + request.addParameter(param.getKey(), param.getValue()); + } + request.setOptions(expectWarnings(expectedWarnings)); + Response response = client().performRequest(request); + assertEquals(expectedStatusCode, response.getStatusLine().getStatusCode()); + assertThat(Integer.valueOf(response.getHeader("Content-Length")), matcher); + assertNull("HEAD requests shouldn't have a response body but " + url + " did", response.getEntity()); + } + +} diff --git a/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4StreamingIT.java b/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4StreamingIT.java new file mode 100644 index 0000000000000..c564e289e3f88 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4StreamingIT.java @@ -0,0 +1,139 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest; + +import org.opensearch.client.Request; +import org.opensearch.client.Response; +import org.opensearch.client.ResponseException; +import org.opensearch.client.StreamingRequest; +import org.opensearch.client.StreamingResponse; +import org.opensearch.test.rest.OpenSearchRestTestCase; +import org.opensearch.test.rest.yaml.ObjectPath; +import org.junit.After; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.time.Duration; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +import reactor.core.publisher.Flux; +import reactor.test.StepVerifier; +import reactor.test.scheduler.VirtualTimeScheduler; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.collection.IsEmptyCollection.empty; + +public class ReactorNetty4StreamingIT extends OpenSearchRestTestCase { + @After + @Override + public void tearDown() throws Exception { + final Request request = new Request("DELETE", "/test-streaming"); + request.addParameter("ignore_unavailable", "true"); + + final Response response = client().performRequest(request); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + + super.tearDown(); + } + + public void testStreamingRequest() throws IOException { + final VirtualTimeScheduler scheduler = VirtualTimeScheduler.create(true); + + final Stream stream = IntStream.range(1, 6) + .mapToObj(id -> "{ \"index\": { \"_index\": \"test-streaming\", \"_id\": \"" + id + "\" } }\n" + "{ \"name\": \"josh\" }\n"); + + final Duration delay = Duration.ofMillis(1); + final StreamingRequest streamingRequest = new StreamingRequest<>( + "POST", + "/_bulk/stream", + Flux.fromStream(stream).delayElements(delay, scheduler).map(s -> ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8))) + ); + streamingRequest.addParameter("refresh", "true"); + + final StreamingResponse streamingResponse = client().streamRequest(streamingRequest); + scheduler.advanceTimeBy(delay); /* emit first element */ + + StepVerifier.create(Flux.from(streamingResponse.getBody()).map(b -> new String(b.array(), StandardCharsets.UTF_8))) + .expectNextMatches(s -> s.contains("\"result\":\"created\"") && s.contains("\"_id\":\"1\"")) + .then(() -> scheduler.advanceTimeBy(delay)) + .expectNextMatches(s -> s.contains("\"result\":\"created\"") && s.contains("\"_id\":\"2\"")) + .then(() -> scheduler.advanceTimeBy(delay)) + .expectNextMatches(s -> s.contains("\"result\":\"created\"") && s.contains("\"_id\":\"3\"")) + .then(() -> scheduler.advanceTimeBy(delay)) + .expectNextMatches(s -> s.contains("\"result\":\"created\"") && s.contains("\"_id\":\"4\"")) + .then(() -> scheduler.advanceTimeBy(delay)) + .expectNextMatches(s -> s.contains("\"result\":\"created\"") && s.contains("\"_id\":\"5\"")) + .then(() -> scheduler.advanceTimeBy(delay)) + .expectComplete() + .verify(); + + assertThat(streamingResponse.getStatusLine().getStatusCode(), equalTo(200)); + assertThat(streamingResponse.getWarnings(), empty()); + + final Request request = new Request("GET", "/test-streaming/_count"); + final Response response = client().performRequest(request); + final ObjectPath objectPath = ObjectPath.createFromResponse(response); + final Integer count = objectPath.evaluate("count"); + assertThat(count, equalTo(5)); + } + + public void testStreamingBadRequest() throws IOException { + final Stream stream = Stream.of( + "{ \"index\": { \"_index\": \"test-streaming\", \"_id\": \"1\" } }\n" + "{ \"name\": \"josh\" }\n" + ); + + final StreamingRequest streamingRequest = new StreamingRequest<>( + "POST", + "/_bulk/stream", + Flux.fromStream(stream).map(s -> ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8))) + ); + streamingRequest.addParameter("refresh", "not-supported-policy"); + + final StreamingResponse streamingResponse = client().streamRequest(streamingRequest); + StepVerifier.create(Flux.from(streamingResponse.getBody()).map(b -> new String(b.array(), StandardCharsets.UTF_8))) + .expectErrorMatches( + ex -> ex instanceof ResponseException && ((ResponseException) ex).getResponse().getStatusLine().getStatusCode() == 400 + ) + .verify(Duration.ofSeconds(10)); + assertThat(streamingResponse.getStatusLine().getStatusCode(), equalTo(400)); + assertThat(streamingResponse.getWarnings(), empty()); + } + + public void testStreamingBadStream() throws IOException { + final VirtualTimeScheduler scheduler = VirtualTimeScheduler.create(true); + + final Stream stream = Stream.of( + "{ \"index\": { \"_index\": \"test-streaming\", \"_id\": \"1\" } }\n" + "{ \"name\": \"josh\" }\n", + "{ \"name\": \"josh\" }\n" + ); + + final Duration delay = Duration.ofMillis(1); + final StreamingRequest streamingRequest = new StreamingRequest<>( + "POST", + "/_bulk/stream", + Flux.fromStream(stream).delayElements(delay, scheduler).map(s -> ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8))) + ); + + final StreamingResponse streamingResponse = client().streamRequest(streamingRequest); + scheduler.advanceTimeBy(delay); /* emit first element */ + + StepVerifier.create(Flux.from(streamingResponse.getBody()).map(b -> new String(b.array(), StandardCharsets.UTF_8))) + .expectNextMatches(s -> s.contains("\"result\":\"created\"") && s.contains("\"_id\":\"1\"")) + .then(() -> scheduler.advanceTimeBy(delay)) + .expectNextMatches(s -> s.contains("\"type\":\"illegal_argument_exception\"")) + .then(() -> scheduler.advanceTimeBy(delay)) + .expectComplete() + .verify(); + + assertThat(streamingResponse.getStatusLine().getStatusCode(), equalTo(200)); + assertThat(streamingResponse.getWarnings(), empty()); + } +} diff --git a/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4StreamingStressIT.java b/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4StreamingStressIT.java new file mode 100644 index 0000000000000..a978af1b11db4 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/javaRestTest/java/org/opensearch/rest/ReactorNetty4StreamingStressIT.java @@ -0,0 +1,95 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest; + +import org.apache.hc.core5.http.ConnectionClosedException; +import org.opensearch.client.Request; +import org.opensearch.client.Response; +import org.opensearch.client.StreamingRequest; +import org.opensearch.client.StreamingResponse; +import org.opensearch.test.rest.OpenSearchRestTestCase; +import org.junit.After; + +import java.io.InterruptedIOException; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.time.Duration; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Stream; + +import reactor.core.publisher.Flux; +import reactor.test.subscriber.TestSubscriber; + +import static org.hamcrest.CoreMatchers.anyOf; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.collection.IsEmptyCollection.empty; + +public class ReactorNetty4StreamingStressIT extends OpenSearchRestTestCase { + @After + @Override + public void tearDown() throws Exception { + final Request request = new Request("DELETE", "/test-stress-streaming"); + request.addParameter("ignore_unavailable", "true"); + + final Response response = adminClient().performRequest(request); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + + super.tearDown(); + } + + public void testCloseClientStreamingRequest() throws Exception { + final AtomicInteger id = new AtomicInteger(0); + final Stream stream = Stream.generate( + () -> "{ \"index\": { \"_index\": \"test-stress-streaming\", \"_id\": \"" + + id.incrementAndGet() + + "\" } }\n" + + "{ \"name\": \"josh\" }\n" + ); + + final StreamingRequest streamingRequest = new StreamingRequest<>( + "POST", + "/_bulk/stream", + Flux.fromStream(stream).delayElements(Duration.ofMillis(500)).map(s -> ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8))) + ); + streamingRequest.addParameter("refresh", "true"); + + final StreamingResponse streamingResponse = client().streamRequest(streamingRequest); + TestSubscriber subscriber = TestSubscriber.create(); + streamingResponse.getBody().subscribe(subscriber); + + final ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); + try { + // Await for subscriber to receive at least one chunk + assertBusy(() -> assertThat(subscriber.getReceivedOnNext(), not(empty()))); + + // Close client forceably + executor.schedule(() -> { + client().close(); + return null; + }, 2, TimeUnit.SECONDS); + + // Await for subscriber to terminate + subscriber.block(Duration.ofSeconds(10)); + assertThat( + subscriber.expectTerminalError(), + anyOf(instanceOf(InterruptedIOException.class), instanceOf(ConnectionClosedException.class)) + ); + } finally { + executor.shutdown(); + if (executor.awaitTermination(1, TimeUnit.SECONDS) == false) { + executor.shutdownNow(); + } + } + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java index 906bbfd072da8..7f4a8f6cdef02 100644 --- a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java @@ -44,6 +44,7 @@ import java.util.List; import java.util.Optional; +import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufAllocator; import io.netty.channel.ChannelOption; import io.netty.channel.socket.nio.NioChannelOption; @@ -390,7 +391,9 @@ protected Publisher incomingRequest(HttpServerRequest request, HttpServerR response.chunkedTransfer(false); response.compression(true); r.headers().forEach(h -> response.addHeader(h.getKey(), h.getValue())); - return Mono.from(response.sendObject(r.content())); + + final ByteBuf content = r.content().copy(); + return Mono.from(response.sendObject(content)); }); } } diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4NonStreamingHttpChannel.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4NonStreamingHttpChannel.java index 7df0b3c0c35fe..3dae2d57cf6a6 100644 --- a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4NonStreamingHttpChannel.java +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4NonStreamingHttpChannel.java @@ -55,9 +55,14 @@ public void addCloseListener(ActionListener listener) { @Override public void sendResponse(HttpResponse response, ActionListener listener) { - emitter.next(createResponse(response)); - listener.onResponse(null); - emitter.complete(); + try { + emitter.next(createResponse(response)); + listener.onResponse(null); + emitter.complete(); + } catch (final Exception ex) { + emitter.error(ex); + listener.onFailure(ex); + } } @Override diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingHttpChannel.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingHttpChannel.java index 56dadea0477c5..1aa03aa9967e2 100644 --- a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingHttpChannel.java +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingHttpChannel.java @@ -101,6 +101,8 @@ public void receiveChunk(HttpChunk message) { lastChunkReceived = true; producer.complete(); } + } catch (final Exception ex) { + producer.error(ex); } finally { message.close(); } diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingRequestConsumer.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingRequestConsumer.java index f34f54e561021..8ed6710c8a1e3 100644 --- a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingRequestConsumer.java +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingRequestConsumer.java @@ -44,7 +44,7 @@ public void subscribe(Subscriber s) { } HttpChunk createChunk(HttpContent chunk, boolean last) { - return new ReactorNetty4HttpChunk(chunk.content().retain(), last); + return new ReactorNetty4HttpChunk(chunk.copy().content(), last); } StreamingHttpChannel httpChannel() { diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingResponseProducer.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingResponseProducer.java index 616edccdfc396..6aaccc500072b 100644 --- a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingResponseProducer.java +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingResponseProducer.java @@ -21,7 +21,11 @@ class ReactorNetty4StreamingResponseProducer implements StreamingHttpContentSend private volatile FluxSink emitter; ReactorNetty4StreamingResponseProducer() { - this.sender = Flux.create(emitter -> this.emitter = emitter); + this.sender = Flux.create(emitter -> register(emitter)); + } + + private void register(FluxSink emitter) { + this.emitter = emitter; } @Override diff --git a/qa/smoke-test-http/build.gradle b/qa/smoke-test-http/build.gradle index f48ddc26d929b..496fda6bb717d 100644 --- a/qa/smoke-test-http/build.gradle +++ b/qa/smoke-test-http/build.gradle @@ -35,6 +35,7 @@ apply plugin: 'opensearch.test-with-dependencies' dependencies { testImplementation project(path: ':modules:transport-netty4') // for http + testImplementation project(path: ':plugins:transport-reactor-netty4') // for http testImplementation project(path: ':plugins:transport-nio') testImplementation project(path: ':plugins:identity-shiro') // for http } diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/HttpSmokeTestCase.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/HttpSmokeTestCase.java index 08974b902c418..6d8e80a0a63ea 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/HttpSmokeTestCase.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/HttpSmokeTestCase.java @@ -38,6 +38,7 @@ import org.opensearch.transport.Netty4ModulePlugin; import org.opensearch.transport.nio.MockNioTransportPlugin; import org.opensearch.transport.nio.NioTransportPlugin; +import org.opensearch.transport.reactor.ReactorNetty4Plugin; import org.junit.BeforeClass; import java.util.Arrays; @@ -53,7 +54,7 @@ public abstract class HttpSmokeTestCase extends OpenSearchIntegTestCase { @BeforeClass public static void setUpTransport() { nodeTransportTypeKey = getTypeKey(randomFrom(getTestTransportPlugin(), Netty4ModulePlugin.class, NioTransportPlugin.class)); - nodeHttpTypeKey = getHttpTypeKey(randomFrom(Netty4ModulePlugin.class, NioTransportPlugin.class)); + nodeHttpTypeKey = getHttpTypeKey(randomFrom(Netty4ModulePlugin.class, NioTransportPlugin.class, ReactorNetty4Plugin.class)); clientTypeKey = getTypeKey(randomFrom(getTestTransportPlugin(), Netty4ModulePlugin.class, NioTransportPlugin.class)); } @@ -71,6 +72,8 @@ private static String getTypeKey(Class clazz) { private static String getHttpTypeKey(Class clazz) { if (clazz.equals(NioTransportPlugin.class)) { return NioTransportPlugin.NIO_HTTP_TRANSPORT_NAME; + } else if (clazz.equals(ReactorNetty4Plugin.class)) { + return ReactorNetty4Plugin.REACTOR_NETTY_HTTP_TRANSPORT_NAME; } else { assert clazz.equals(Netty4ModulePlugin.class); return Netty4ModulePlugin.NETTY_HTTP_TRANSPORT_NAME; @@ -92,7 +95,7 @@ protected Settings nodeSettings(int nodeOrdinal) { @Override protected Collection> nodePlugins() { - return Arrays.asList(getTestTransportPlugin(), Netty4ModulePlugin.class, NioTransportPlugin.class); + return Arrays.asList(getTestTransportPlugin(), Netty4ModulePlugin.class, NioTransportPlugin.class, ReactorNetty4Plugin.class); } @Override diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/IdentityAuthenticationIT.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/IdentityAuthenticationIT.java index 78398e10b9ce8..1a806b033eb8a 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/IdentityAuthenticationIT.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/IdentityAuthenticationIT.java @@ -26,6 +26,8 @@ import org.opensearch.test.OpenSearchTestCase; import org.opensearch.transport.Netty4ModulePlugin; import org.opensearch.transport.nio.NioTransportPlugin; +import org.opensearch.transport.reactor.ReactorNetty4Plugin; + import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.core.StringContains.containsString; @@ -42,7 +44,7 @@ protected Settings nodeSettings(int nodeOrdinal) { @Override protected Collection> nodePlugins() { - return Arrays.asList(OpenSearchTestCase.getTestTransportPlugin(), Netty4ModulePlugin.class, NioTransportPlugin.class, ShiroIdentityPlugin.class); + return Arrays.asList(OpenSearchTestCase.getTestTransportPlugin(), Netty4ModulePlugin.class, NioTransportPlugin.class, ReactorNetty4Plugin.class, ShiroIdentityPlugin.class); } diff --git a/qa/wildfly/src/main/webapp/WEB-INF/jboss-deployment-structure.xml b/qa/wildfly/src/main/webapp/WEB-INF/jboss-deployment-structure.xml index a08090100989a..4fabd038cf915 100644 --- a/qa/wildfly/src/main/webapp/WEB-INF/jboss-deployment-structure.xml +++ b/qa/wildfly/src/main/webapp/WEB-INF/jboss-deployment-structure.xml @@ -3,5 +3,8 @@ + + + diff --git a/server/src/main/java/org/opensearch/rest/RestController.java b/server/src/main/java/org/opensearch/rest/RestController.java index 0c173523fa7cd..7d0c1e2260de1 100644 --- a/server/src/main/java/org/opensearch/rest/RestController.java +++ b/server/src/main/java/org/opensearch/rest/RestController.java @@ -748,8 +748,9 @@ public void sendResponse(RestResponse response) { // over so we need to populate those **before** that, if possible. if (subscribed.get() == false) { prepareResponse(response.status(), Map.of("Content-Type", List.of(response.contentType()))); - Mono.ignoreElements(this).then(Mono.just(response)).subscribe(delegate::sendResponse); } + + Mono.ignoreElements(this).then(Mono.just(response)).subscribe(delegate::sendResponse); } @Override diff --git a/server/src/main/java/org/opensearch/rest/action/document/RestBulkStreamingAction.java b/server/src/main/java/org/opensearch/rest/action/document/RestBulkStreamingAction.java index ce6e32a7824c9..a38244fe9ff20 100644 --- a/server/src/main/java/org/opensearch/rest/action/document/RestBulkStreamingAction.java +++ b/server/src/main/java/org/opensearch/rest/action/document/RestBulkStreamingAction.java @@ -8,6 +8,7 @@ package org.opensearch.rest.action.document; +import com.google.protobuf.ExperimentalApi; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.DocWriteRequest; import org.opensearch.action.bulk.BulkItemResponse; @@ -26,6 +27,7 @@ import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.http.HttpChunk; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestRequest; @@ -37,6 +39,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.CompletableFuture; +import java.util.stream.Stream; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; @@ -57,6 +60,7 @@ * * @opensearch.api */ +@ExperimentalApi public class RestBulkStreamingAction extends BaseRestHandler { private static final BulkResponse EMPTY = new BulkResponse(new BulkItemResponse[0], 0L); private final boolean allowExplicitIndex; @@ -95,6 +99,18 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC final StreamingRestChannelConsumer consumer = (channel) -> { final MediaType mediaType = request.getMediaType(); + // We prepare (and more importantly, validate) the templated BulkRequest instance: in case the parameters + // are incorrect, we are going to fail the request immediately, instead of producing a possibly large amount + // of failed chunks. + FetchSourceContext defaultFetchSourceContext = FetchSourceContext.parseFromRestRequest(request); + BulkRequest prepareBulkRequest = Requests.bulkRequest(); + if (waitForActiveShards != null) { + prepareBulkRequest.waitForActiveShards(ActiveShardCount.parseString(waitForActiveShards)); + } + + prepareBulkRequest.timeout(timeout); + prepareBulkRequest.setRefreshPolicy(refresh); + // Set the content type and the status code before sending the response stream over channel.prepareResponse(RestStatus.OK, Map.of("Content-Type", List.of(mediaType.mediaTypeWithoutParameters()))); @@ -105,17 +121,17 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC // TODOs: // - add batching (by interval and/or count) // - eliminate serialization inefficiencies - Flux.from(channel).map(chunk -> { - FetchSourceContext defaultFetchSourceContext = FetchSourceContext.parseFromRestRequest(request); + Flux.from(channel).zipWith(Flux.fromStream(Stream.generate(() -> { BulkRequest bulkRequest = Requests.bulkRequest(); - if (waitForActiveShards != null) { - bulkRequest.waitForActiveShards(ActiveShardCount.parseString(waitForActiveShards)); - } - - bulkRequest.timeout(timeout); - bulkRequest.setRefreshPolicy(refresh); - - try { + bulkRequest.waitForActiveShards(prepareBulkRequest.waitForActiveShards()); + bulkRequest.timeout(prepareBulkRequest.timeout()); + bulkRequest.setRefreshPolicy(prepareBulkRequest.getRefreshPolicy()); + return bulkRequest; + }))).map(t -> { + final HttpChunk chunk = t.getT1(); + final BulkRequest bulkRequest = t.getT2(); + + try (chunk) { bulkRequest.add( chunk.content(), defaultIndex, @@ -168,7 +184,17 @@ public void onFailure(Exception ex) { } catch (IOException ex) { throw new UncheckedIOException(ex); } - })).subscribe(); + })).onErrorComplete(ex -> { + if (ex instanceof Error) { + return false; + } + try { + channel.sendResponse(new BytesRestResponse(channel, (Exception) ex)); + return true; + } catch (final IOException e) { + throw new UncheckedIOException(e); + } + }).subscribe(); }; return channel -> { From 47078850355562c5cf7ab3540866b4958ec196be Mon Sep 17 00:00:00 2001 From: rishavz_sagar Date: Wed, 31 Jul 2024 23:58:30 +0530 Subject: [PATCH 38/68] Caching number of primary shards per node for evaluating constraints on avg primary shards across all indices per node (#14992) Signed-off-by: RS146BIJAY --- .../allocator/BalancedShardsAllocator.java | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index ae173bbf06c4f..212583d1fb14f 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -476,6 +476,7 @@ void updateRebalanceConstraint(String constraint, boolean add) { public static class ModelNode implements Iterable { private final Map indices = new HashMap<>(); private int numShards = 0; + private int numPrimaryShards = 0; private final RoutingNode routingNode; ModelNode(RoutingNode routingNode) { @@ -509,7 +510,7 @@ public int numPrimaryShards(String idx) { } public int numPrimaryShards() { - return indices.values().stream().mapToInt(index -> index.numPrimaryShards()).sum(); + return numPrimaryShards; } public int highestPrimary(String index) { @@ -527,6 +528,10 @@ public void addShard(ShardRouting shard) { indices.put(index.getIndexId(), index); } index.addShard(shard); + if (shard.primary()) { + numPrimaryShards++; + } + numShards++; } @@ -538,6 +543,11 @@ public void removeShard(ShardRouting shard) { indices.remove(shard.getIndexName()); } } + + if (shard.primary()) { + numPrimaryShards--; + } + numShards--; } From e7ee950992911739eb1b079491731073ddd52e4f Mon Sep 17 00:00:00 2001 From: Craig Perkins Date: Wed, 31 Jul 2024 14:34:54 -0400 Subject: [PATCH 39/68] Add ThreadContextPermission for stashAndMergeHeaders and stashWithOrigin (#15039) * Add ThreadContextPermission for stashAndMergeHeaders and stashWithOrigin Signed-off-by: Craig Perkins * Add to CHANGELOG Signed-off-by: Craig Perkins * Use ThreadContextAccess Signed-off-by: Craig Perkins --------- Signed-off-by: Craig Perkins --- CHANGELOG.md | 1 + .../client/OriginSettingClient.java | 7 ++++++- .../client/support/AbstractClient.java | 5 ++++- .../common/util/concurrent/ThreadContext.java | 10 ++++++++++ .../org/opensearch/bootstrap/security.policy | 1 + .../bootstrap/test-framework.policy | 2 ++ .../util/concurrent/ThreadContextTests.java | 20 +++++++++++++++---- 7 files changed, 40 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f63c7c5524d86..c1846bd5e7cfd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Streaming Indexing] Enhance RestClient with a new streaming API support ([#14437](https://github.com/opensearch-project/OpenSearch/pull/14437)) - Add basic aggregation support for derived fields ([#14618](https://github.com/opensearch-project/OpenSearch/pull/14618)) - Add ThreadContextPermission for markAsSystemContext and allow core to perform the method ([#15016](https://github.com/opensearch-project/OpenSearch/pull/15016)) +- Add ThreadContextPermission for stashAndMergeHeaders and stashWithOrigin ([#15039](https://github.com/opensearch-project/OpenSearch/pull/15039)) ### Dependencies - Bump `org.apache.commons:commons-lang3` from 3.14.0 to 3.15.0 ([#14861](https://github.com/opensearch-project/OpenSearch/pull/14861)) diff --git a/server/src/main/java/org/opensearch/client/OriginSettingClient.java b/server/src/main/java/org/opensearch/client/OriginSettingClient.java index 1b0e08cc489c4..27d87227df7bc 100644 --- a/server/src/main/java/org/opensearch/client/OriginSettingClient.java +++ b/server/src/main/java/org/opensearch/client/OriginSettingClient.java @@ -36,6 +36,7 @@ import org.opensearch.action.ActionType; import org.opensearch.action.support.ContextPreservingActionListener; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.concurrent.ThreadContextAccess; import org.opensearch.core.action.ActionListener; import org.opensearch.core.action.ActionResponse; @@ -65,7 +66,11 @@ protected void ActionListener listener ) { final Supplier supplier = in().threadPool().getThreadContext().newRestorableContext(false); - try (ThreadContext.StoredContext ignore = in().threadPool().getThreadContext().stashWithOrigin(origin)) { + try ( + ThreadContext.StoredContext ignore = ThreadContextAccess.doPrivileged( + () -> in().threadPool().getThreadContext().stashWithOrigin(origin) + ) + ) { super.doExecute(action, request, new ContextPreservingActionListener<>(supplier, listener)); } } diff --git a/server/src/main/java/org/opensearch/client/support/AbstractClient.java b/server/src/main/java/org/opensearch/client/support/AbstractClient.java index 6c6049f04231b..509cd732357d6 100644 --- a/server/src/main/java/org/opensearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/opensearch/client/support/AbstractClient.java @@ -416,6 +416,7 @@ import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.concurrent.ThreadContextAccess; import org.opensearch.core.action.ActionListener; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.bytes.BytesReference; @@ -2148,7 +2149,9 @@ protected void ActionListener listener ) { ThreadContext threadContext = threadPool().getThreadContext(); - try (ThreadContext.StoredContext ctx = threadContext.stashAndMergeHeaders(headers)) { + try ( + ThreadContext.StoredContext ctx = ThreadContextAccess.doPrivileged(() -> threadContext.stashAndMergeHeaders(headers)) + ) { super.doExecute(action, request, listener); } } diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java index b955934c4f547..3e02a26aab488 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java @@ -116,6 +116,8 @@ public final class ThreadContext implements Writeable { // thread context permissions private static final Permission ACCESS_SYSTEM_THREAD_CONTEXT_PERMISSION = new ThreadContextPermission("markAsSystemContext"); + private static final Permission STASH_AND_MERGE_THREAD_CONTEXT_PERMISSION = new ThreadContextPermission("stashAndMergeHeaders"); + private static final Permission STASH_WITH_ORIGIN_THREAD_CONTEXT_PERMISSION = new ThreadContextPermission("stashWithOrigin"); private static final Logger logger = LogManager.getLogger(ThreadContext.class); private static final ThreadContextStruct DEFAULT_CONTEXT = new ThreadContextStruct(); @@ -213,6 +215,10 @@ public Writeable captureAsWriteable() { * if it can't find the task in memory. */ public StoredContext stashWithOrigin(String origin) { + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + sm.checkPermission(STASH_WITH_ORIGIN_THREAD_CONTEXT_PERMISSION); + } final ThreadContext.StoredContext storedContext = stashContext(); putTransient(ACTION_ORIGIN_TRANSIENT_NAME, origin); return storedContext; @@ -224,6 +230,10 @@ public StoredContext stashWithOrigin(String origin) { * that are already existing are preserved unless they are defaults. */ public StoredContext stashAndMergeHeaders(Map headers) { + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + sm.checkPermission(STASH_AND_MERGE_THREAD_CONTEXT_PERMISSION); + } final ThreadContextStruct context = threadLocal.get(); Map newHeader = new HashMap<>(headers); newHeader.putAll(context.requestHeaders); diff --git a/server/src/main/resources/org/opensearch/bootstrap/security.policy b/server/src/main/resources/org/opensearch/bootstrap/security.policy index b7aaa2e3eec48..22e445f7d9022 100644 --- a/server/src/main/resources/org/opensearch/bootstrap/security.policy +++ b/server/src/main/resources/org/opensearch/bootstrap/security.policy @@ -49,6 +49,7 @@ grant codeBase "${codebase.opensearch}" { // needed for SPI class loading permission java.lang.RuntimePermission "accessDeclaredMembers"; permission org.opensearch.secure_sm.ThreadContextPermission "markAsSystemContext"; + permission org.opensearch.secure_sm.ThreadContextPermission "stashWithOrigin"; }; //// Very special jar permissions: diff --git a/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy b/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy index f674c90c45a0e..19f8adbe003ca 100644 --- a/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy +++ b/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy @@ -158,4 +158,6 @@ grant { permission java.lang.RuntimePermission "accessClassInPackage.sun.reflect"; permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; permission org.opensearch.secure_sm.ThreadContextPermission "markAsSystemContext"; + permission org.opensearch.secure_sm.ThreadContextPermission "stashAndMergeHeaders"; + permission org.opensearch.secure_sm.ThreadContextPermission "stashWithOrigin"; }; diff --git a/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java b/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java index 4c7cd4513412d..5992ffa1465b4 100644 --- a/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java +++ b/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java @@ -206,7 +206,7 @@ public void testStashWithOrigin() { } assertNull(threadContext.getTransient(ThreadContext.ACTION_ORIGIN_TRANSIENT_NAME)); - try (ThreadContext.StoredContext storedContext = threadContext.stashWithOrigin(origin)) { + try (ThreadContext.StoredContext storedContext = ThreadContextAccess.doPrivileged(() -> threadContext.stashWithOrigin(origin))) { assertEquals(origin, threadContext.getTransient(ThreadContext.ACTION_ORIGIN_TRANSIENT_NAME)); assertNull(threadContext.getTransient("foo")); assertNull(threadContext.getTransient("bar")); @@ -231,7 +231,7 @@ public void testStashAndMerge() { HashMap toMerge = new HashMap<>(); toMerge.put("foo", "baz"); toMerge.put("simon", "says"); - try (ThreadContext.StoredContext ctx = threadContext.stashAndMergeHeaders(toMerge)) { + try (ThreadContext.StoredContext ctx = ThreadContextAccess.doPrivileged(() -> threadContext.stashAndMergeHeaders(toMerge))) { assertEquals("bar", threadContext.getHeader("foo")); assertEquals("says", threadContext.getHeader("simon")); assertNull(threadContext.getTransient("ctx.foo")); @@ -493,7 +493,13 @@ public void testStashAndMergeWithModifiedDefaults() { ThreadContext threadContext = new ThreadContext(build); HashMap toMerge = new HashMap<>(); toMerge.put("default", "2"); - try (ThreadContext.StoredContext ctx = threadContext.stashAndMergeHeaders(toMerge)) { + ThreadContext finalThreadContext1 = threadContext; + HashMap finalToMerge1 = toMerge; + try ( + ThreadContext.StoredContext ctx = ThreadContextAccess.doPrivileged( + () -> finalThreadContext1.stashAndMergeHeaders(finalToMerge1) + ) + ) { assertEquals("2", threadContext.getHeader("default")); } @@ -502,7 +508,13 @@ public void testStashAndMergeWithModifiedDefaults() { threadContext.putHeader("default", "4"); toMerge = new HashMap<>(); toMerge.put("default", "2"); - try (ThreadContext.StoredContext ctx = threadContext.stashAndMergeHeaders(toMerge)) { + ThreadContext finalThreadContext2 = threadContext; + HashMap finalToMerge2 = toMerge; + try ( + ThreadContext.StoredContext ctx = ThreadContextAccess.doPrivileged( + () -> finalThreadContext2.stashAndMergeHeaders(finalToMerge2) + ) + ) { assertEquals("4", threadContext.getHeader("default")); } } From 0324edda286a2ab8a795d7f86b3402d229cd2255 Mon Sep 17 00:00:00 2001 From: Neetika Singhal Date: Wed, 31 Jul 2024 12:09:54 -0700 Subject: [PATCH 40/68] Route search traffic to _primary_first for warm index (#14934) Signed-off-by: Neetika Singhal --- .../cluster/routing/OperationRouting.java | 9 +++ .../routing/OperationRoutingTests.java | 64 +++++++++++++++++++ 2 files changed, 73 insertions(+) diff --git a/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java b/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java index 6158461c7d4e9..6242247f34a93 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java +++ b/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java @@ -42,8 +42,10 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.Strings; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.IndexModule; import org.opensearch.index.IndexNotFoundException; import org.opensearch.node.ResponseCollectorService; @@ -245,6 +247,13 @@ public GroupShardsIterator searchShards( preference = Preference.PRIMARY.type(); } + if (FeatureFlags.isEnabled(FeatureFlags.TIERED_REMOTE_INDEX) + && IndexModule.DataLocalityType.PARTIAL.name() + .equals(indexMetadataForShard.getSettings().get(IndexModule.INDEX_STORE_LOCALITY_SETTING.getKey())) + && (preference == null || preference.isEmpty())) { + preference = Preference.PRIMARY_FIRST.type(); + } + ShardIterator iterator = preferenceActiveShardIterator( shard, clusterState.nodes().getLocalNodeId(), diff --git a/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java b/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java index 4f3e50eebb9c6..ad8b48d56c417 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java @@ -41,9 +41,11 @@ import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.SuppressForbidden; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; @@ -1054,6 +1056,68 @@ public void testSearchableSnapshotPrimaryDefault() throws Exception { } } + @SuppressForbidden(reason = "feature flag overrides") + public void testPartialIndexPrimaryDefault() throws Exception { + System.setProperty(FeatureFlags.TIERED_REMOTE_INDEX, "true"); + final int numIndices = 1; + final int numShards = 2; + final int numReplicas = 2; + final String[] indexNames = new String[numIndices]; + for (int i = 0; i < numIndices; i++) { + indexNames[i] = "test" + i; + } + // The first index is a partial index + final String indexName = indexNames[0]; + ClusterService clusterService = null; + ThreadPool threadPool = null; + + try { + OperationRouting opRouting = new OperationRouting( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + + ClusterState state = ClusterStateCreationUtils.stateWithAssignedPrimariesAndReplicas(indexNames, numShards, numReplicas); + threadPool = new TestThreadPool("testPartialIndexPrimaryDefault"); + clusterService = ClusterServiceUtils.createClusterService(threadPool); + + // Update the index config within the cluster state to modify the index to a partial index + IndexMetadata partialIndexMetadata = IndexMetadata.builder(indexName) + .settings( + Settings.builder() + .put(state.metadata().index(indexName).getSettings()) + .put(IndexModule.INDEX_STORE_LOCALITY_SETTING.getKey(), IndexModule.DataLocalityType.PARTIAL) + .build() + ) + .build(); + Metadata.Builder metadataBuilder = Metadata.builder(state.metadata()) + .put(partialIndexMetadata, false) + .generateClusterUuidIfNeeded(); + state = ClusterState.builder(state).metadata(metadataBuilder.build()).build(); + + // Verify default preference is primary only + GroupShardsIterator groupIterator = opRouting.searchShards(state, indexNames, null, null); + assertThat("One group per index shard", groupIterator.size(), equalTo(numIndices * numShards)); + + for (ShardIterator shardIterator : groupIterator) { + assertTrue("Only primary should exist with no preference", shardIterator.nextOrNull().primary()); + } + + // Verify alternative preference can be applied to a partial index + groupIterator = opRouting.searchShards(state, indexNames, null, "_replica"); + assertThat("One group per index shard", groupIterator.size(), equalTo(numIndices * numShards)); + + for (ShardIterator shardIterator : groupIterator) { + assertThat("Replica shards will be returned", shardIterator.size(), equalTo(numReplicas)); + assertFalse("Returned shard should be a replica", shardIterator.nextOrNull().primary()); + } + } finally { + IOUtils.close(clusterService); + terminate(threadPool); + System.setProperty(FeatureFlags.TIERED_REMOTE_INDEX, "false"); + } + } + private DiscoveryNode[] setupNodes() { // Sets up two data nodes in zone-a and one data node in zone-b List zones = Arrays.asList("a", "a", "b"); From 67a2e4c7275afa93ce2c6fc2107ca0f7a8c461bd Mon Sep 17 00:00:00 2001 From: Craig Perkins Date: Wed, 31 Jul 2024 20:23:30 -0400 Subject: [PATCH 41/68] Add javadoc about ThreadContextPermission for stashWithOrigin and stashAndMergeHeaders (#15051) Signed-off-by: Craig Perkins --- .../common/util/concurrent/ThreadContext.java | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java index 3e02a26aab488..070e18481f2a3 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java @@ -213,6 +213,13 @@ public Writeable captureAsWriteable() { * For example, a user might not have permission to GET from the tasks index * but the tasks API will perform a get on their behalf using this method * if it can't find the task in memory. + * + * Usage of stashWithOrigin is guarded by a ThreadContextPermission. In order to use + * stashWithOrigin, the codebase needs to explicitly be granted permission in the JSM policy file. + * + * Add an entry in the grant portion of the policy file like this: + * + * permission org.opensearch.secure_sm.ThreadContextPermission "stashWithOrigin"; */ public StoredContext stashWithOrigin(String origin) { SecurityManager sm = System.getSecurityManager(); @@ -228,6 +235,13 @@ public StoredContext stashWithOrigin(String origin) { * Removes the current context and resets a new context that contains a merge of the current headers and the given headers. * The removed context can be restored when closing the returned {@link StoredContext}. The merge strategy is that headers * that are already existing are preserved unless they are defaults. + * + * Usage of stashAndMergeHeaders is guarded by a ThreadContextPermission. In order to use + * stashAndMergeHeaders, the codebase needs to explicitly be granted permission in the JSM policy file. + * + * Add an entry in the grant portion of the policy file like this: + * + * permission org.opensearch.secure_sm.ThreadContextPermission "stashAndMergeHeaders"; */ public StoredContext stashAndMergeHeaders(Map headers) { SecurityManager sm = System.getSecurityManager(); From d4e7766a90f45fc54ecd5658a5fae472ed9b7030 Mon Sep 17 00:00:00 2001 From: bowenlan-amzn Date: Fri, 2 Aug 2024 04:51:09 -0700 Subject: [PATCH 42/68] Add 2.17.0 in main branch (#15053) Signed-off-by: bowenlan-amzn --- .ci/bwcVersions | 1 + libs/core/src/main/java/org/opensearch/Version.java | 1 + 2 files changed, 2 insertions(+) diff --git a/.ci/bwcVersions b/.ci/bwcVersions index a738eb54e17f6..771bfe694b698 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -36,3 +36,4 @@ BWC_VERSION: - "2.15.0" - "2.15.1" - "2.16.0" + - "2.17.0" diff --git a/libs/core/src/main/java/org/opensearch/Version.java b/libs/core/src/main/java/org/opensearch/Version.java index b647a92d6708a..c2d8ce9be29dd 100644 --- a/libs/core/src/main/java/org/opensearch/Version.java +++ b/libs/core/src/main/java/org/opensearch/Version.java @@ -107,6 +107,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_15_0 = new Version(2150099, org.apache.lucene.util.Version.LUCENE_9_10_0); public static final Version V_2_15_1 = new Version(2150199, org.apache.lucene.util.Version.LUCENE_9_10_0); public static final Version V_2_16_0 = new Version(2160099, org.apache.lucene.util.Version.LUCENE_9_11_1); + public static final Version V_2_17_0 = new Version(2170099, org.apache.lucene.util.Version.LUCENE_9_11_1); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_12_0); public static final Version CURRENT = V_3_0_0; From 7c471a0f02bfded2e987608f45254bbce1bd4734 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Fri, 2 Aug 2024 13:34:16 -0400 Subject: [PATCH 43/68] Add MacOS aarch64 to precommit since we rolled out the support for such distribution (#15082) Signed-off-by: Andriy Redko --- .github/workflows/precommit.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/precommit.yml b/.github/workflows/precommit.yml index 95ca49ac9cb43..793fdae5df4da 100644 --- a/.github/workflows/precommit.yml +++ b/.github/workflows/precommit.yml @@ -8,7 +8,7 @@ jobs: strategy: matrix: java: [ 11, 17, 21 ] - os: [ubuntu-latest, windows-latest, macos-13] + os: [ubuntu-latest, windows-latest, macos-latest, macos-13] steps: - uses: actions/checkout@v4 - name: Set up JDK ${{ matrix.java }} From 48634bdc277d44ff3027ca702b0f08b111fcc88a Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Fri, 2 Aug 2024 13:35:57 -0400 Subject: [PATCH 44/68] Bump Netty to 4.1.112.Final (#15081) Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + buildSrc/version.properties | 2 +- .../licenses/netty-buffer-4.1.111.Final.jar.sha1 | 1 - .../licenses/netty-buffer-4.1.112.Final.jar.sha1 | 1 + .../licenses/netty-codec-4.1.111.Final.jar.sha1 | 1 - .../licenses/netty-codec-4.1.112.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.111.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.112.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.111.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.112.Final.jar.sha1 | 1 + .../licenses/netty-common-4.1.111.Final.jar.sha1 | 1 - .../licenses/netty-common-4.1.112.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.111.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.112.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.111.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.112.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.111.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.112.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.111.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.112.Final.jar.sha1 | 1 + .../licenses/netty-codec-dns-4.1.111.Final.jar.sha1 | 1 - .../licenses/netty-codec-dns-4.1.112.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.111.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.112.Final.jar.sha1 | 1 + .../licenses/netty-codec-socks-4.1.111.Final.jar.sha1 | 1 - .../licenses/netty-codec-socks-4.1.112.Final.jar.sha1 | 1 + .../licenses/netty-handler-proxy-4.1.111.Final.jar.sha1 | 1 - .../licenses/netty-handler-proxy-4.1.112.Final.jar.sha1 | 1 + .../licenses/netty-resolver-dns-4.1.111.Final.jar.sha1 | 1 - .../licenses/netty-resolver-dns-4.1.112.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.111.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.112.Final.jar.sha1 | 1 + .../repository-hdfs/licenses/netty-all-4.1.111.Final.jar.sha1 | 1 - .../repository-hdfs/licenses/netty-all-4.1.112.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-buffer-4.1.111.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-buffer-4.1.112.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-codec-4.1.111.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-codec-4.1.112.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.111.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.112.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.111.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.112.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-common-4.1.111.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-common-4.1.112.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-handler-4.1.111.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-handler-4.1.112.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.111.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.112.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.111.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.112.Final.jar.sha1 | 1 + .../netty-transport-classes-epoll-4.1.111.Final.jar.sha1 | 1 - .../netty-transport-classes-epoll-4.1.112.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.111.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.112.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-buffer-4.1.111.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-buffer-4.1.112.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-codec-4.1.111.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-codec-4.1.112.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.111.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.112.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-common-4.1.111.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-common-4.1.112.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-handler-4.1.111.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-handler-4.1.112.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.111.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.112.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.111.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.112.Final.jar.sha1 | 1 + .../licenses/netty-buffer-4.1.111.Final.jar.sha1 | 1 - .../licenses/netty-buffer-4.1.112.Final.jar.sha1 | 1 + .../licenses/netty-codec-4.1.111.Final.jar.sha1 | 1 - .../licenses/netty-codec-4.1.112.Final.jar.sha1 | 1 + .../licenses/netty-codec-dns-4.1.111.Final.jar.sha1 | 1 - .../licenses/netty-codec-dns-4.1.112.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.111.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.112.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.111.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.112.Final.jar.sha1 | 1 + .../licenses/netty-common-4.1.111.Final.jar.sha1 | 1 - .../licenses/netty-common-4.1.112.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.111.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.112.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.111.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.112.Final.jar.sha1 | 1 + .../licenses/netty-resolver-dns-4.1.111.Final.jar.sha1 | 1 - .../licenses/netty-resolver-dns-4.1.112.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.111.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.112.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.111.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.112.Final.jar.sha1 | 1 + 90 files changed, 46 insertions(+), 45 deletions(-) delete mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.111.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.112.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.111.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.112.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.111.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.112.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-common-4.1.111.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-common-4.1.112.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.111.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.112.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.111.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.112.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.111.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.112.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.111.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.112.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.111.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.112.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.111.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.112.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.111.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.112.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.111.Final.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.112.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-buffer-4.1.111.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-buffer-4.1.112.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-4.1.111.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-4.1.112.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-http-4.1.111.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-http-4.1.112.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-common-4.1.111.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-common-4.1.112.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-handler-4.1.111.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-handler-4.1.112.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-resolver-4.1.111.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-resolver-4.1.112.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-4.1.111.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-4.1.112.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.111.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.112.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.111.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.112.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.111.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.112.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.111.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.112.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-common-4.1.111.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-common-4.1.112.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.111.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.112.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.111.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.112.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.111.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.112.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.111.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.112.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-4.1.111.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-4.1.112.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.111.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.112.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.111.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.112.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-common-4.1.111.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-common-4.1.112.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-handler-4.1.111.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-handler-4.1.112.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.111.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.112.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.111.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.112.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-transport-4.1.111.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-transport-4.1.112.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index c1846bd5e7cfd..c240cf26627cd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add ThreadContextPermission for stashAndMergeHeaders and stashWithOrigin ([#15039](https://github.com/opensearch-project/OpenSearch/pull/15039)) ### Dependencies +- Bump `netty` from 4.1.111.Final to 4.1.112.Final ([#15081](https://github.com/opensearch-project/OpenSearch/pull/15081)) - Bump `org.apache.commons:commons-lang3` from 3.14.0 to 3.15.0 ([#14861](https://github.com/opensearch-project/OpenSearch/pull/14861)) - OpenJDK Update (July 2024 Patch releases) ([#14998](https://github.com/opensearch-project/OpenSearch/pull/14998)) - Bump `com.microsoft.azure:msal4j` from 1.16.1 to 1.16.2 ([#14995](https://github.com/opensearch-project/OpenSearch/pull/14995)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index eb67af909bccf..08c45ef058716 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -29,7 +29,7 @@ hdrhistogram = 2.2.2 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.13.0 -netty = 4.1.111.Final +netty = 4.1.112.Final joda = 2.12.7 # project reactor diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.111.Final.jar.sha1 deleted file mode 100644 index 6784ac6c3b64f..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b54863f578939e135d3b3aea610284ae57c188cf \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.112.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..5c26883046fed --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +bdc12df04bb6858890b8aa108060b5b365a26102 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.111.Final.jar.sha1 deleted file mode 100644 index 3d86194de9213..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a6762ec00a6d268f9980741f5b755838bcd658bf \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.112.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..1fd224fdd0b44 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +c87f2ec3d9a97bd2b793d16817abb2bab93a7fc3 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.111.Final.jar.sha1 deleted file mode 100644 index 4ef1adb818300..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c6ecbc452321e632bf3cea0f9758839b650455c7 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.112.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..22d35128c3ad5 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +81af1040bfa977f98dd0e1bd9639513ea862ca04 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 deleted file mode 100644 index 06c86b8fda557..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f0cca5df75bfb4f858d0435f601d8b1cae1de054 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..d4767d06b22bf --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +7fa28b510f0f16f4d5d7188b86bef59e048f62f9 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.111.Final.jar.sha1 deleted file mode 100644 index 16cb1cce7f504..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -58210befcb31adbcadd5724966a061444db91863 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.112.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..47af3100f0f2d --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +b2798069092a981a832b7510d0462ee9efb7a80e \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.111.Final.jar.sha1 deleted file mode 100644 index 2f70f791f65ed..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2bc6a58ad2e9e279634b6e55022e8dcd3c175cc4 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.112.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..8b30272861770 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +3d5e2d5bcc6baeeb8c13a230980c6132a778e036 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.111.Final.jar.sha1 deleted file mode 100644 index 621cbf58f3133..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3493179999f211dc49714319f81da2be86523a3b \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.112.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..1a094fa19a623 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +58a631d9d44c4ed7cc0dcc9cffa6641da9374d72 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.111.Final.jar.sha1 deleted file mode 100644 index ac96e7545ed58..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -24e97cf14ea9d80afe4c5ab69066b587fccc154a \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.112.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..5fbfde0836e0c --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +77cd136dd3843f5e7cbcf68c824975d745c49ddb \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 deleted file mode 100644 index 0847ac3034db7..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -acafc128cddafa021bc0b48b0788eb0e118add5e \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..8dad0e3104dc8 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +b50ff619cdcdc48e748cba3405c9988529f28f60 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.111.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.111.Final.jar.sha1 deleted file mode 100644 index 5e3f819012811..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f988dbb527efb0e7cf7d444cc50b0fc3f5f380ec \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.112.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..a42a41b6387c8 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +06724b184ee870ecc4d8fc36931beeb3c387b0ee \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 deleted file mode 100644 index 06c86b8fda557..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f0cca5df75bfb4f858d0435f601d8b1cae1de054 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..d4767d06b22bf --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +7fa28b510f0f16f4d5d7188b86bef59e048f62f9 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.111.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.111.Final.jar.sha1 deleted file mode 100644 index 226ee06d39d6c..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ea52ef6617a9b69b0baaebb7f0b80373527f9607 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.112.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..5291a16c10448 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +9aed7e78c467d06a47a45b5b27466380a6427e2f \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.111.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.111.Final.jar.sha1 deleted file mode 100644 index dcc2b0c7ca923..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1e459c8630bb7c942b79a97e62dd728798de6a8c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.112.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..cf50574b87da0 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +b23c87a85451b3b0e7c3e8e89698cea6831a8418 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.111.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.111.Final.jar.sha1 deleted file mode 100644 index b22ad6784809b..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5ac6a3d96935129ba45ea768ad30e31cad0d8c4d \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.112.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..24e8177190e04 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +375872f1c16bb51aac016ff6ee4f5d28b1288d4d \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 deleted file mode 100644 index 0847ac3034db7..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -acafc128cddafa021bc0b48b0788eb0e118add5e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..8dad0e3104dc8 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +b50ff619cdcdc48e748cba3405c9988529f28f60 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.111.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.111.Final.jar.sha1 deleted file mode 100644 index 076124a7d1f89..0000000000000 --- a/plugins/repository-hdfs/licenses/netty-all-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8fba10bb4911517eb1bdcc05ef392499dda4d5ac \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.112.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..7c36b789e839c --- /dev/null +++ b/plugins/repository-hdfs/licenses/netty-all-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +d6b2e543749a86957777a46cf68aaa337cc558cb \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.111.Final.jar.sha1 deleted file mode 100644 index 6784ac6c3b64f..0000000000000 --- a/plugins/repository-s3/licenses/netty-buffer-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b54863f578939e135d3b3aea610284ae57c188cf \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.112.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..5c26883046fed --- /dev/null +++ b/plugins/repository-s3/licenses/netty-buffer-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +bdc12df04bb6858890b8aa108060b5b365a26102 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.111.Final.jar.sha1 deleted file mode 100644 index 3d86194de9213..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a6762ec00a6d268f9980741f5b755838bcd658bf \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.112.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..1fd224fdd0b44 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +c87f2ec3d9a97bd2b793d16817abb2bab93a7fc3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.111.Final.jar.sha1 deleted file mode 100644 index 4ef1adb818300..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c6ecbc452321e632bf3cea0f9758839b650455c7 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.112.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..22d35128c3ad5 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +81af1040bfa977f98dd0e1bd9639513ea862ca04 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 deleted file mode 100644 index 06c86b8fda557..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f0cca5df75bfb4f858d0435f601d8b1cae1de054 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..d4767d06b22bf --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +7fa28b510f0f16f4d5d7188b86bef59e048f62f9 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.111.Final.jar.sha1 deleted file mode 100644 index 16cb1cce7f504..0000000000000 --- a/plugins/repository-s3/licenses/netty-common-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -58210befcb31adbcadd5724966a061444db91863 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.112.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..47af3100f0f2d --- /dev/null +++ b/plugins/repository-s3/licenses/netty-common-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +b2798069092a981a832b7510d0462ee9efb7a80e \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.111.Final.jar.sha1 deleted file mode 100644 index 2f70f791f65ed..0000000000000 --- a/plugins/repository-s3/licenses/netty-handler-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2bc6a58ad2e9e279634b6e55022e8dcd3c175cc4 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.112.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..8b30272861770 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-handler-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +3d5e2d5bcc6baeeb8c13a230980c6132a778e036 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.111.Final.jar.sha1 deleted file mode 100644 index 621cbf58f3133..0000000000000 --- a/plugins/repository-s3/licenses/netty-resolver-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3493179999f211dc49714319f81da2be86523a3b \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.112.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..1a094fa19a623 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-resolver-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +58a631d9d44c4ed7cc0dcc9cffa6641da9374d72 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.111.Final.jar.sha1 deleted file mode 100644 index ac96e7545ed58..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -24e97cf14ea9d80afe4c5ab69066b587fccc154a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.112.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..5fbfde0836e0c --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +77cd136dd3843f5e7cbcf68c824975d745c49ddb \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.111.Final.jar.sha1 deleted file mode 100644 index 97001777eadf5..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8b97d32eb1489043e478deea99bd93ce487b82f6 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.112.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..0196dacfe92ba --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +67e590356eb53c20aaabd67f61ae66f628e62e3d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 deleted file mode 100644 index 0847ac3034db7..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -acafc128cddafa021bc0b48b0788eb0e118add5e \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..8dad0e3104dc8 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +b50ff619cdcdc48e748cba3405c9988529f28f60 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.111.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.111.Final.jar.sha1 deleted file mode 100644 index 6784ac6c3b64f..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b54863f578939e135d3b3aea610284ae57c188cf \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.112.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..5c26883046fed --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +bdc12df04bb6858890b8aa108060b5b365a26102 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.111.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.111.Final.jar.sha1 deleted file mode 100644 index 3d86194de9213..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a6762ec00a6d268f9980741f5b755838bcd658bf \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.112.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..1fd224fdd0b44 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +c87f2ec3d9a97bd2b793d16817abb2bab93a7fc3 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.111.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.111.Final.jar.sha1 deleted file mode 100644 index 4ef1adb818300..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c6ecbc452321e632bf3cea0f9758839b650455c7 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.112.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..22d35128c3ad5 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +81af1040bfa977f98dd0e1bd9639513ea862ca04 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.111.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.111.Final.jar.sha1 deleted file mode 100644 index 16cb1cce7f504..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -58210befcb31adbcadd5724966a061444db91863 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.112.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..47af3100f0f2d --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +b2798069092a981a832b7510d0462ee9efb7a80e \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.111.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.111.Final.jar.sha1 deleted file mode 100644 index 2f70f791f65ed..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2bc6a58ad2e9e279634b6e55022e8dcd3c175cc4 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.112.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..8b30272861770 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +3d5e2d5bcc6baeeb8c13a230980c6132a778e036 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.111.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.111.Final.jar.sha1 deleted file mode 100644 index 621cbf58f3133..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3493179999f211dc49714319f81da2be86523a3b \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.112.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..1a094fa19a623 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +58a631d9d44c4ed7cc0dcc9cffa6641da9374d72 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.111.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.111.Final.jar.sha1 deleted file mode 100644 index ac96e7545ed58..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -24e97cf14ea9d80afe4c5ab69066b587fccc154a \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.112.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..5fbfde0836e0c --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +77cd136dd3843f5e7cbcf68c824975d745c49ddb \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.111.Final.jar.sha1 deleted file mode 100644 index 6784ac6c3b64f..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b54863f578939e135d3b3aea610284ae57c188cf \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.112.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..5c26883046fed --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +bdc12df04bb6858890b8aa108060b5b365a26102 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.111.Final.jar.sha1 deleted file mode 100644 index 3d86194de9213..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a6762ec00a6d268f9980741f5b755838bcd658bf \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.112.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..1fd224fdd0b44 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +c87f2ec3d9a97bd2b793d16817abb2bab93a7fc3 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.111.Final.jar.sha1 deleted file mode 100644 index 5e3f819012811..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f988dbb527efb0e7cf7d444cc50b0fc3f5f380ec \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.112.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..a42a41b6387c8 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +06724b184ee870ecc4d8fc36931beeb3c387b0ee \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.111.Final.jar.sha1 deleted file mode 100644 index 4ef1adb818300..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c6ecbc452321e632bf3cea0f9758839b650455c7 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.112.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..22d35128c3ad5 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +81af1040bfa977f98dd0e1bd9639513ea862ca04 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 deleted file mode 100644 index 06c86b8fda557..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f0cca5df75bfb4f858d0435f601d8b1cae1de054 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..d4767d06b22bf --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +7fa28b510f0f16f4d5d7188b86bef59e048f62f9 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.111.Final.jar.sha1 deleted file mode 100644 index 16cb1cce7f504..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -58210befcb31adbcadd5724966a061444db91863 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.112.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..47af3100f0f2d --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +b2798069092a981a832b7510d0462ee9efb7a80e \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.111.Final.jar.sha1 deleted file mode 100644 index 2f70f791f65ed..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2bc6a58ad2e9e279634b6e55022e8dcd3c175cc4 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.112.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..8b30272861770 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +3d5e2d5bcc6baeeb8c13a230980c6132a778e036 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.111.Final.jar.sha1 deleted file mode 100644 index 621cbf58f3133..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3493179999f211dc49714319f81da2be86523a3b \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.112.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..1a094fa19a623 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +58a631d9d44c4ed7cc0dcc9cffa6641da9374d72 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.111.Final.jar.sha1 deleted file mode 100644 index b22ad6784809b..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5ac6a3d96935129ba45ea768ad30e31cad0d8c4d \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.112.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..24e8177190e04 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +375872f1c16bb51aac016ff6ee4f5d28b1288d4d \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.111.Final.jar.sha1 deleted file mode 100644 index ac96e7545ed58..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -24e97cf14ea9d80afe4c5ab69066b587fccc154a \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.112.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..5fbfde0836e0c --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +77cd136dd3843f5e7cbcf68c824975d745c49ddb \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 deleted file mode 100644 index 0847ac3034db7..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -acafc128cddafa021bc0b48b0788eb0e118add5e \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 new file mode 100644 index 0000000000000..8dad0e3104dc8 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.112.Final.jar.sha1 @@ -0,0 +1 @@ +b50ff619cdcdc48e748cba3405c9988529f28f60 \ No newline at end of file From f829a9f2a59aa2d864c197b58d8b20095d0081fb Mon Sep 17 00:00:00 2001 From: Peter Nied Date: Fri, 2 Aug 2024 12:38:15 -0500 Subject: [PATCH 45/68] Decommission the Core Triage meeting (#15085) Resolves: https://github.com/opensearch-project/OpenSearch/issues/14706 Signed-off-by: Peter Nied --- TRIAGING.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/TRIAGING.md b/TRIAGING.md index c7c07a8ce30bd..6791d5944ee6f 100644 --- a/TRIAGING.md +++ b/TRIAGING.md @@ -1,6 +1,6 @@ -The maintainers of the OpenSearch Repo seek to promote an inclusive and engaged community of contributors. In order to facilitate this, weekly triage meetings are open-to-all and attendance is encouraged for anyone who hopes to contribute, discuss an issue, or learn more about the project. There are several weekly triage meetings scoped to the following component areas: Search, Storage, Cluster Manager, and finally "Core" as a catch-all for all other issues. To learn more about contributing to the OpenSearch Repo visit the [Contributing](./CONTRIBUTING.md) documentation. +The maintainers of the OpenSearch Repo seek to promote an inclusive and engaged community of contributors. In order to facilitate this, weekly triage meetings are open-to-all and attendance is encouraged for anyone who hopes to contribute, discuss an issue, or learn more about the project. There are several weekly triage meetings scoped to the following component areas: Search, Storage, and Cluster Manager. To learn more about contributing to the OpenSearch Repo visit the [Contributing](./CONTRIBUTING.md) documentation. ### Do I need to attend for my issue to be addressed/triaged? @@ -14,7 +14,7 @@ Each meeting we seek to address all new issues. However, should we run out of ti ### How do I join a Triage meeting? - Check the [OpenSearch Meetup Group](https://www.meetup.com/opensearch/) for the latest schedule and details for joining each meeting. Each component area has its own meetup series: [Search](https://www.meetup.com/opensearch/events/300929493/), [Storage](https://www.meetup.com/opensearch/events/299907409/), [Cluster Manager](https://www.meetup.com/opensearch/events/301082218/), [Indexing](https://www.meetup.com/opensearch/events/301734024/), and [Core](https://www.meetup.com/opensearch/events/301061009/). + Check the [OpenSearch Meetup Group](https://www.meetup.com/opensearch/) for the latest schedule and details for joining each meeting. Each component area has its own meetup series: [Search](https://www.meetup.com/opensearch/events/300929493/), [Storage](https://www.meetup.com/opensearch/events/299907409/), [Cluster Manager](https://www.meetup.com/opensearch/events/301082218/), and [Indexing](https://www.meetup.com/opensearch/events/301734024/). After joining the virtual meeting, you can enable your video / voice to join the discussion. If you do not have a webcam or microphone available, you can still join in via the text chat. From bd226c215000866ea83f5aa872d7771d26effece Mon Sep 17 00:00:00 2001 From: kkewwei Date: Sat, 3 Aug 2024 02:51:55 +0800 Subject: [PATCH 46/68] support rangeQuery and regexpQuery in constant_keyword field type (#14711) --------- Signed-off-by: kkewwei --- CHANGELOG.md | 1 + .../test/index/110_constant_keyword.yml | 282 +++++++++++++++++- .../index/mapper/ConstantFieldType.java | 2 +- .../mapper/ConstantKeywordFieldMapper.java | 66 ++++ .../mapper/ConstantKeywordFieldTypeTests.java | 54 ++++ 5 files changed, 394 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c240cf26627cd..dfc330cfdaed2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add basic aggregation support for derived fields ([#14618](https://github.com/opensearch-project/OpenSearch/pull/14618)) - Add ThreadContextPermission for markAsSystemContext and allow core to perform the method ([#15016](https://github.com/opensearch-project/OpenSearch/pull/15016)) - Add ThreadContextPermission for stashAndMergeHeaders and stashWithOrigin ([#15039](https://github.com/opensearch-project/OpenSearch/pull/15039)) +- Add `rangeQuery` and `regexpQuery` for `constant_keyword` field type ([#14711](https://github.com/opensearch-project/OpenSearch/pull/14711)) ### Dependencies - Bump `netty` from 4.1.111.Final to 4.1.112.Final ([#15081](https://github.com/opensearch-project/OpenSearch/pull/15081)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/110_constant_keyword.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/110_constant_keyword.yml index f4f8b3752bec8..1c50187534026 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/110_constant_keyword.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/110_constant_keyword.yml @@ -1,17 +1,13 @@ +# The test setup includes two parts: +# part1: test mapping and indexing +# part2: test query --- -# The test setup includes: -# - Create index with constant_keyword field type -# - Check mapping -# - Index two example documents -# - Search -# - Delete Index when connection is teardown - -"Mappings and Supported queries": +"Mappings and Indexing": - skip: version: " - 2.15.99" reason: "fixed in 2.16.0" - # Create index with constant_keyword field type + # Create indices with constant_keyword field type - do: indices.create: index: test @@ -22,7 +18,7 @@ type: "constant_keyword" value: "1" - # Index document + # Index documents to test integer and string are both ok. - do: index: index: test @@ -39,6 +35,7 @@ "genre": 1 } + # Refresh - do: indices.refresh: index: test @@ -54,6 +51,7 @@ # Verify Document Count - do: search: + index: test body: { query: { match_all: {} @@ -68,3 +66,267 @@ - do: indices.delete: index: test + +--- +"Queries": + - skip: + version: " - 2.99.99" + reason: "rangeQuery and regexpQuery are supported in 3.0.0 in main branch" + + - do: + indices.create: + index: test1 + body: + mappings: + properties: + genre: + type: "constant_keyword" + value: "d3efault" + + # Index documents to test query. + - do: + index: + index: test1 + id: 1 + body: { + "genre": "d3efault" + } + + # Refresh + - do: + indices.refresh: + index: test1 + + # Test rangeQuery + - do: + search: + index: test1 + body: { + query: { + range: { + genre: { + gte: "d3efault" + } + } + } + } + + - length: { hits.hits: 1 } + + - do: + search: + index: test1 + body: { + query: { + range: { + genre: { + from: "d3efault", + "include_lower": "false" + } + } + } + } + + - length: { hits.hits: 0 } + + - do: + search: + index: test1 + body: { + query: { + range: { + genre: { + lte: "d3efault" + } + } + } + } + + - length: { hits.hits: 1 } + + - do: + search: + index: test1 + body: { + query: { + range: { + genre: { + to: "d3efault", + include_upper: "false" + } + } + } + } + + - length: { hits.hits: 0 } + + - do: + search: + index: test1 + body: { + query: { + range: { + genre: { + from: "d3efault", + to: "d3efault", + include_lower: "false", + include_upper: "true" + } + } + } + } + + - length: { hits.hits: 0 } + + - do: + search: + index: test1 + body: { + query: { + range: { + genre: { + from: "d3efault", + to: "d3efault", + include_lower: "true", + include_upper: "false" + } + } + } + } + + - length: { hits.hits: 0 } + + - do: + search: + index: test1 + body: { + query: { + range: { + genre: { + from: null, + to: null + } + } + } + } + + - length: { hits.hits: 1 } + + - do: + search: + index: test1 + body: { + query: { + range: { + genre: { + from: "d3efault", + to: "d3efault", + include_lower: "true", + include_upper: "true" + } + } + } + } + + - length: { hits.hits: 1 } + + - do: + search: + index: test1 + body: { + query: { + range: { + genre: { + from: "d3efaul", + to: "d3efault1", + include_lower: "true", + include_upper: "true" + } + } + } + } + + - length: { hits.hits: 1 } + + # Test regexpQuery + - do: + search: + index: test1 + body: { + query: { + regexp: { + "genre":"d.*" + } + } + } + + - length: { hits.hits: 1 } + + - do: + search: + index: test1 + body: { + query: { + regexp: { + "genre":"d\\defau[a-z]?t" + } + } + } + + - length: { hits.hits: 1 } + + - do: + search: + index: test1 + body: { + query: { + regexp: { + "genre":"d\\defa[a-z]?t" + } + } + } + + - length: { hits.hits: 0 } + + - do: + search: + index: test1 + body: { + query: { + regexp: { + "genre":"d3efa[a-z]{3,3}" + } + } + } + + - length: { hits.hits: 1 } + + - do: + search: + index: test1 + body: { + query: { + regexp: { + "genre":"d3efa[a-z]{4,4}" + } + } + } + + - length: { hits.hits: 0 } + + - do: + search: + index: test1 + body: { + query: { + match_all: {} + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.genre: "d3efault" } + + # Delete Index when connection is teardown + - do: + indices.delete: + index: test1 diff --git a/server/src/main/java/org/opensearch/index/mapper/ConstantFieldType.java b/server/src/main/java/org/opensearch/index/mapper/ConstantFieldType.java index a28a6369b1aa4..cc581651e5295 100644 --- a/server/src/main/java/org/opensearch/index/mapper/ConstantFieldType.java +++ b/server/src/main/java/org/opensearch/index/mapper/ConstantFieldType.java @@ -76,7 +76,7 @@ public final boolean isAggregatable() { */ protected abstract boolean matches(String pattern, boolean caseInsensitive, QueryShardContext context); - private static String valueToString(Object value) { + static String valueToString(Object value) { return value instanceof BytesRef ? ((BytesRef) value).utf8ToString() : value.toString(); } diff --git a/server/src/main/java/org/opensearch/index/mapper/ConstantKeywordFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/ConstantKeywordFieldMapper.java index 2edd817f61f61..02c2214c18e72 100644 --- a/server/src/main/java/org/opensearch/index/mapper/ConstantKeywordFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/ConstantKeywordFieldMapper.java @@ -9,10 +9,21 @@ package org.opensearch.index.mapper; import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.RegexpQuery; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.automaton.Automaton; +import org.apache.lucene.util.automaton.ByteRunAutomaton; +import org.apache.lucene.util.automaton.RegExp; import org.opensearch.OpenSearchParseException; +import org.opensearch.common.Nullable; import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.geo.ShapeRelation; +import org.opensearch.common.lucene.BytesRefs; import org.opensearch.common.regex.Regex; +import org.opensearch.common.time.DateMathParser; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.plain.ConstantIndexFieldData; import org.opensearch.index.query.QueryShardContext; @@ -20,6 +31,7 @@ import org.opensearch.search.lookup.SearchLookup; import java.io.IOException; +import java.time.ZoneId; import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -122,6 +134,60 @@ public Query existsQuery(QueryShardContext context) { return new MatchAllDocsQuery(); } + @Override + public Query rangeQuery( + Object lowerTerm, + Object upperTerm, + boolean includeLower, + boolean includeUpper, + ShapeRelation relation, + ZoneId timeZone, + DateMathParser parser, + QueryShardContext context + ) { + if (lowerTerm != null) { + lowerTerm = valueToString(lowerTerm); + } + if (upperTerm != null) { + upperTerm = valueToString(upperTerm); + } + + if (lowerTerm != null && upperTerm != null && ((String) lowerTerm).compareTo((String) upperTerm) > 0) { + return new MatchNoDocsQuery(); + } + + if (lowerTerm != null && ((String) lowerTerm).compareTo(value) > (includeLower ? 0 : -1)) { + return new MatchNoDocsQuery(); + } + + if (upperTerm != null && ((String) upperTerm).compareTo(value) < (includeUpper ? 0 : 1)) { + return new MatchNoDocsQuery(); + } + return new MatchAllDocsQuery(); + } + + @Override + public Query regexpQuery( + String value, + int syntaxFlags, + int matchFlags, + int maxDeterminizedStates, + @Nullable MultiTermQuery.RewriteMethod method, + QueryShardContext context + ) { + Automaton automaton = new RegExp(value, syntaxFlags, matchFlags).toAutomaton( + RegexpQuery.DEFAULT_PROVIDER, + maxDeterminizedStates + ); + ByteRunAutomaton byteRunAutomaton = new ByteRunAutomaton(automaton); + BytesRef valueBytes = BytesRefs.toBytesRef(this.value); + if (byteRunAutomaton.run(valueBytes.bytes, valueBytes.offset, valueBytes.length)) { + return new MatchAllDocsQuery(); + } else { + return new MatchNoDocsQuery(); + } + } + @Override public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, Supplier searchLookup) { return new ConstantIndexFieldData.Builder(fullyQualifiedIndexName, name(), CoreValuesSourceType.BYTES); diff --git a/server/src/test/java/org/opensearch/index/mapper/ConstantKeywordFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/ConstantKeywordFieldTypeTests.java index 235811539a299..266d79fb8e8b8 100644 --- a/server/src/test/java/org/opensearch/index/mapper/ConstantKeywordFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/ConstantKeywordFieldTypeTests.java @@ -10,6 +10,8 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.MultiTermQuery; +import org.apache.lucene.search.Query; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.regex.Regex; @@ -61,6 +63,58 @@ public void testExistsQuery() { assertEquals(new MatchAllDocsQuery(), ft.existsQuery(createContext())); } + public void testRangeQuery() { + Query actual = ft.rangeQuery("default", null, true, false, null, null, null, MOCK_QSC); + assertEquals(new MatchAllDocsQuery(), actual); + + actual = ft.rangeQuery("default", null, false, false, null, null, null, MOCK_QSC); + assertEquals(new MatchNoDocsQuery(), actual); + + actual = ft.rangeQuery(null, "default", true, true, null, null, null, MOCK_QSC); + assertEquals(new MatchAllDocsQuery(), actual); + + actual = ft.rangeQuery(null, "default", false, false, null, null, null, MOCK_QSC); + assertEquals(new MatchNoDocsQuery(), actual); + + actual = ft.rangeQuery("default", "default", false, true, null, null, null, MOCK_QSC); + assertEquals(new MatchNoDocsQuery(), actual); + + actual = ft.rangeQuery("default", "default", true, false, null, null, null, MOCK_QSC); + assertEquals(new MatchNoDocsQuery(), actual); + + actual = ft.rangeQuery(null, null, false, false, null, null, null, MOCK_QSC); + assertEquals(new MatchAllDocsQuery(), actual); + + actual = ft.rangeQuery("default", "default", true, true, null, null, null, MOCK_QSC); + assertEquals(new MatchAllDocsQuery(), actual); + + actual = ft.rangeQuery("defaul", "default1", true, true, null, null, null, MOCK_QSC); + assertEquals(new MatchAllDocsQuery(), actual); + } + + public void testRegexpQuery() { + final ConstantKeywordFieldMapper.ConstantKeywordFieldType ft = new ConstantKeywordFieldMapper.ConstantKeywordFieldType( + "field", + "d3efault" + ); + // test .* + Query query = ft.regexpQuery("d.*", 0, 0, 10, MultiTermQuery.CONSTANT_SCORE_BLENDED_REWRITE, MOCK_QSC); + assertEquals(new MatchAllDocsQuery(), query); + // test \d and ? + query = ft.regexpQuery("d\\defau[a-z]?t", 0, 0, 10, MultiTermQuery.CONSTANT_SCORE_BLENDED_REWRITE, MOCK_QSC); + assertEquals(new MatchAllDocsQuery(), query); + + // test \d and ? + query = ft.regexpQuery("d\\defa[a-z]?t", 0, 0, 10, MultiTermQuery.CONSTANT_SCORE_BLENDED_REWRITE, MOCK_QSC); + assertEquals(new MatchNoDocsQuery(), query); + // \w{m,n} + query = ft.regexpQuery("d3efa[a-z]{3,3}", 0, 0, 10, MultiTermQuery.CONSTANT_SCORE_BLENDED_REWRITE, MOCK_QSC); + assertEquals(new MatchAllDocsQuery(), query); + // \w{m,n} + query = ft.regexpQuery("d3efa[a-z]{4,4}", 0, 0, 10, MultiTermQuery.CONSTANT_SCORE_BLENDED_REWRITE, MOCK_QSC); + assertEquals(new MatchNoDocsQuery(), query); + } + private QueryShardContext createContext() { IndexMetadata indexMetadata = IndexMetadata.builder("index") .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT)) From a785073e5e7925ef8e5605427cae943822100f4a Mon Sep 17 00:00:00 2001 From: Jay Deng Date: Fri, 2 Aug 2024 12:57:00 -0700 Subject: [PATCH 47/68] Support scripting for composite aggs in concurrent segment search (#15072) Signed-off-by: Jay Deng --- CHANGELOG.md | 1 + modules/lang-painless/build.gradle | 1 + .../opensearch/painless/SimplePainlessIT.java | 231 ++++++++++++++++++ .../CompositeAggregationFactory.java | 4 +- .../search/lookup/SearchLookup.java | 13 +- .../search/lookup/SourceLookup.java | 2 +- 6 files changed, 247 insertions(+), 5 deletions(-) create mode 100644 modules/lang-painless/src/internalClusterTest/java/org/opensearch/painless/SimplePainlessIT.java diff --git a/CHANGELOG.md b/CHANGELOG.md index dfc330cfdaed2..708c9831236b2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add basic aggregation support for derived fields ([#14618](https://github.com/opensearch-project/OpenSearch/pull/14618)) - Add ThreadContextPermission for markAsSystemContext and allow core to perform the method ([#15016](https://github.com/opensearch-project/OpenSearch/pull/15016)) - Add ThreadContextPermission for stashAndMergeHeaders and stashWithOrigin ([#15039](https://github.com/opensearch-project/OpenSearch/pull/15039)) +- [Concurrent Segment Search] Support composite aggregations with scripting ([#15072](https://github.com/opensearch-project/OpenSearch/pull/15072)) - Add `rangeQuery` and `regexpQuery` for `constant_keyword` field type ([#14711](https://github.com/opensearch-project/OpenSearch/pull/14711)) ### Dependencies diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle index 7b828109139c8..7075901979e3b 100644 --- a/modules/lang-painless/build.gradle +++ b/modules/lang-painless/build.gradle @@ -33,6 +33,7 @@ import com.github.jengelman.gradle.plugins.shadow.ShadowBasePlugin apply plugin: 'opensearch.validate-rest-spec' apply plugin: 'opensearch.yaml-rest-test' +apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { description 'An easy, safe and fast scripting language for OpenSearch' diff --git a/modules/lang-painless/src/internalClusterTest/java/org/opensearch/painless/SimplePainlessIT.java b/modules/lang-painless/src/internalClusterTest/java/org/opensearch/painless/SimplePainlessIT.java new file mode 100644 index 0000000000000..df327bf4871c6 --- /dev/null +++ b/modules/lang-painless/src/internalClusterTest/java/org/opensearch/painless/SimplePainlessIT.java @@ -0,0 +1,231 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.painless; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.support.WriteRequest; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.query.TermsQueryBuilder; +import org.opensearch.plugins.Plugin; +import org.opensearch.script.Script; +import org.opensearch.script.ScriptType; +import org.opensearch.search.aggregations.AggregationBuilder; +import org.opensearch.search.aggregations.AggregationBuilders; +import org.opensearch.search.aggregations.bucket.composite.InternalComposite; +import org.opensearch.search.aggregations.bucket.composite.TermsValuesSourceBuilder; +import org.opensearch.search.aggregations.bucket.terms.Terms; +import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; + +@OpenSearchIntegTestCase.SuiteScopeTestCase +public class SimplePainlessIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public SimplePainlessIT(Settings nodeSettings) { + super(nodeSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() } + ); + } + + @Override + protected Collection> nodePlugins() { + return List.of(PainlessModulePlugin.class); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.getKey(), "4") + .build(); + } + + @Override + public void setupSuiteScopeCluster() throws Exception { + XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() + .startObject() + .field("dynamic", "false") + .startObject("_meta") + .field("schema_version", 5) + .endObject() + .startObject("properties") + .startObject("entity") + .field("type", "nested") + .endObject() + .endObject() + .endObject(); + + assertAcked( + prepareCreate("test").setMapping(xContentBuilder) + .setSettings( + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + ) + ); + + assertAcked( + prepareCreate("test-df").setSettings( + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + ) + ); + + client().prepareIndex("test") + .setId("a") + .setSource( + "{\"entity\":[{\"name\":\"ip-field\",\"value\":\"1.2.3.4\"},{\"name\":\"keyword-field\",\"value\":\"field-1\"}]}", + MediaTypeRegistry.JSON + ) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + client().prepareIndex("test") + .setId("b") + .setSource( + "{\"entity\":[{\"name\":\"ip-field\",\"value\":\"5.6.7.8\"},{\"name\":\"keyword-field\",\"value\":\"field-2\"}]}", + MediaTypeRegistry.JSON + ) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + client().prepareIndex("test") + .setId("c") + .setSource( + "{\"entity\":[{\"name\":\"ip-field\",\"value\":\"1.6.3.8\"},{\"name\":\"keyword-field\",\"value\":\"field-2\"}]}", + MediaTypeRegistry.JSON + ) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + client().prepareIndex("test") + .setId("d") + .setSource( + "{\"entity\":[{\"name\":\"ip-field\",\"value\":\"2.6.4.8\"},{\"name\":\"keyword-field\",\"value\":\"field-2\"}]}", + MediaTypeRegistry.JSON + ) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + ensureSearchable("test"); + + client().prepareIndex("test-df") + .setId("a") + .setSource("{\"field\":\"value1\"}", MediaTypeRegistry.JSON) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + client().prepareIndex("test-df") + .setId("b") + .setSource("{\"field\":\"value2\"}", MediaTypeRegistry.JSON) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + client().prepareIndex("test-df") + .setId("c") + .setSource("{\"field\":\"value3\"}", MediaTypeRegistry.JSON) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + client().prepareIndex("test-df") + .setId("d") + .setSource("{\"field\":\"value1\"}", MediaTypeRegistry.JSON) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + ensureSearchable("test"); + } + + public void testTermsValuesSource() throws Exception { + AggregationBuilder agg = AggregationBuilders.composite( + "multi_buckets", + Collections.singletonList( + new TermsValuesSourceBuilder("keyword-field").script( + new Script( + ScriptType.INLINE, + "painless", + "String value = null; if (params == null || params._source == null || params._source.entity == null) { return \"\"; } for (item in params._source.entity) { if (item[\"name\"] == \"keyword-field\") { value = item['value']; break; } } return value;", + Collections.emptyMap() + ) + ) + ) + ); + SearchResponse response = client().prepareSearch("test").setQuery(matchAllQuery()).addAggregation(agg).get(); + + assertSearchResponse(response); + assertEquals(2, ((InternalComposite) response.getAggregations().get("multi_buckets")).getBuckets().size()); + assertEquals( + "field-1", + ((InternalComposite) response.getAggregations().get("multi_buckets")).getBuckets().get(0).getKey().get("keyword-field") + ); + assertEquals(1, ((InternalComposite) response.getAggregations().get("multi_buckets")).getBuckets().get(0).getDocCount()); + assertEquals( + "field-2", + ((InternalComposite) response.getAggregations().get("multi_buckets")).getBuckets().get(1).getKey().get("keyword-field") + ); + assertEquals(3, ((InternalComposite) response.getAggregations().get("multi_buckets")).getBuckets().get(1).getDocCount()); + } + + public void testSimpleDerivedFieldsQuery() { + assumeFalse( + "Derived fields do not support concurrent search https://github.com/opensearch-project/OpenSearch/issues/15007", + internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + ); + SearchRequest searchRequest = new SearchRequest("test-df").source( + SearchSourceBuilder.searchSource() + .derivedField("result", "keyword", new Script("emit(params._source[\"field\"])")) + .fetchField("result") + .query(new TermsQueryBuilder("result", "value1")) + ); + SearchResponse response = client().search(searchRequest).actionGet(); + assertSearchResponse(response); + assertEquals(2, Objects.requireNonNull(response.getHits().getTotalHits()).value); + } + + public void testSimpleDerivedFieldsAgg() { + assumeFalse( + "Derived fields do not support concurrent search https://github.com/opensearch-project/OpenSearch/issues/15007", + internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + ); + SearchRequest searchRequest = new SearchRequest("test-df").source( + SearchSourceBuilder.searchSource() + .derivedField("result", "keyword", new Script("emit(params._source[\"field\"])")) + .fetchField("result") + .aggregation(new TermsAggregationBuilder("derived-agg").field("result")) + ); + SearchResponse response = client().search(searchRequest).actionGet(); + assertSearchResponse(response); + Terms aggResponse = response.getAggregations().get("derived-agg"); + assertEquals(3, aggResponse.getBuckets().size()); + Terms.Bucket bucket = aggResponse.getBuckets().get(0); + assertEquals("value1", bucket.getKey()); + assertEquals(2, bucket.getDocCount()); + bucket = aggResponse.getBuckets().get(1); + assertEquals("value2", bucket.getKey()); + assertEquals(1, bucket.getDocCount()); + bucket = aggResponse.getBuckets().get(2); + assertEquals("value3", bucket.getKey()); + assertEquals(1, bucket.getDocCount()); + } +} diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java index 6c5619a843fae..2ff79fb623def 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java @@ -40,7 +40,6 @@ import org.opensearch.search.internal.SearchContext; import java.io.IOException; -import java.util.Arrays; import java.util.Map; /** @@ -81,7 +80,6 @@ protected Aggregator createInternal( @Override protected boolean supportsConcurrentSegmentSearch() { - // Disable concurrent search if any scripting is used. See https://github.com/opensearch-project/OpenSearch/issues/12331 for details - return Arrays.stream(sources).noneMatch(CompositeValuesSourceConfig::hasScript); + return true; } } diff --git a/server/src/main/java/org/opensearch/search/lookup/SearchLookup.java b/server/src/main/java/org/opensearch/search/lookup/SearchLookup.java index 906616eb9ba5f..dff8fae1a9ad1 100644 --- a/server/src/main/java/org/opensearch/search/lookup/SearchLookup.java +++ b/server/src/main/java/org/opensearch/search/lookup/SearchLookup.java @@ -153,14 +153,25 @@ public final SearchLookup forkAndTrackFieldReferences(String field) { return new SearchLookup(this, newFieldChain); } + /** + * SourceLookup is not thread safe, so we create a new instance for each leaf to support concurrent segment search + */ public LeafSearchLookup getLeafSearchLookup(LeafReaderContext context) { - return new LeafSearchLookup(context, docMap.getLeafDocLookup(context), sourceLookup, fieldsLookup.getLeafFieldsLookup(context)); + return new LeafSearchLookup( + context, + docMap.getLeafDocLookup(context), + new SourceLookup(), + fieldsLookup.getLeafFieldsLookup(context) + ); } public DocLookup doc() { return docMap; } + /** + * Returned SourceLookup will be unrelated to any created LeafSearchLookups. Instead, use {@link LeafSearchLookup#source()} to access the related {@link SearchLookup}. + */ public SourceLookup source() { return sourceLookup; } diff --git a/server/src/main/java/org/opensearch/search/lookup/SourceLookup.java b/server/src/main/java/org/opensearch/search/lookup/SourceLookup.java index cbac29fde7932..4644bcb3d9b92 100644 --- a/server/src/main/java/org/opensearch/search/lookup/SourceLookup.java +++ b/server/src/main/java/org/opensearch/search/lookup/SourceLookup.java @@ -57,7 +57,7 @@ import static java.util.Collections.emptyMap; /** - * Orchestrator class for source lookups + * Orchestrator class for source lookups. Not thread safe. * * @opensearch.api */ From 47171f8badbc185e79b175a17376bf3f294516e3 Mon Sep 17 00:00:00 2001 From: Gaurav Bafna <85113518+gbbafna@users.noreply.github.com> Date: Sat, 3 Aug 2024 14:03:17 +0530 Subject: [PATCH 48/68] Fix RemoteCloneIndex flaky test by using sync FS repo (#15037) Signed-off-by: Gaurav Bafna --- .../action/admin/indices/create/RemoteCloneIndexIT.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java index acbd68fff6dd0..009f5111078de 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java @@ -79,7 +79,7 @@ protected boolean forbidPrivateIndexSettings() { @Before public void setup() { - asyncUploadMockFsRepo = true; + asyncUploadMockFsRepo = false; } public void testCreateCloneIndex() { @@ -153,6 +153,7 @@ public void testCreateCloneIndex() { } + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/15056") public void testCreateCloneIndexLowPriorityRateLimit() { Version version = VersionUtils.randomIndexCompatibleVersion(random()); int numPrimaryShards = 1; @@ -280,7 +281,7 @@ public void testCreateCloneIndexFailure() throws ExecutionException, Interrupted throw new RuntimeException(e); } finally { setFailRate(REPOSITORY_NAME, 0); - ensureGreen(); + ensureGreen(TimeValue.timeValueSeconds(40)); // clean up client().admin() .cluster() From a9d09aa533c8c949e79c760569d875ab5391bd84 Mon Sep 17 00:00:00 2001 From: gaobinlong Date: Sat, 3 Aug 2024 20:38:11 +0800 Subject: [PATCH 49/68] Fix delete index template failed when the index template matches a data stream but is unused (#15080) * Fix delete not-using index template failed when the index pattern matches a data stream Signed-off-by: Gao Binlong * modify change log Signed-off-by: Gao Binlong * Fix version check Signed-off-by: Gao Binlong --------- Signed-off-by: Gao Binlong --- CHANGELOG.md | 1 + .../10_basic.yml | 52 +++++++++++++++++ .../MetadataIndexTemplateService.java | 2 +- .../MetadataIndexTemplateServiceTests.java | 58 +++++++++++++++++++ 4 files changed, 112 insertions(+), 1 deletion(-) create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete_index_template/10_basic.yml diff --git a/CHANGELOG.md b/CHANGELOG.md index 708c9831236b2..97464c7659f75 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,6 +32,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Fixed - Fix constraint bug which allows more primary shards than average primary shards per index ([#14908](https://github.com/opensearch-project/OpenSearch/pull/14908)) - Fix missing value of FieldSort for unsigned_long ([#14963](https://github.com/opensearch-project/OpenSearch/pull/14963)) +- Fix delete index template failed when the index template matches a data stream but is unused ([#15080](https://github.com/opensearch-project/OpenSearch/pull/15080)) ### Security diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete_index_template/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete_index_template/10_basic.yml new file mode 100644 index 0000000000000..c90e83ab59859 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete_index_template/10_basic.yml @@ -0,0 +1,52 @@ +setup: + - do: + indices.put_index_template: + name: test_template_1 + body: + index_patterns: test-* + template: + settings: + number_of_shards: 1 + number_of_replicas: 0 + "priority": 50 + + - do: + indices.put_index_template: + name: test_template_2 + body: + index_patterns: test-* + data_stream: {} + template: + settings: + number_of_shards: 1 + number_of_replicas: 0 + "priority": 51 + +--- +teardown: + - do: + indices.delete_data_stream: + name: test-1 + ignore: 404 + - do: + indices.delete_index_template: + name: test_template_1 + ignore: 404 + - do: + indices.delete_index_template: + name: test_template_2 + ignore: 404 + +--- +"Delete index template which is not used by data stream but index pattern matches": + - skip: + version: " - 2.99.99" + reason: "fixed in 3.0.0" + + - do: + indices.create_data_stream: + name: test-1 + + - do: + indices.delete_index_template: + name: test_template_1 diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java index 7bc3d279513cd..6b638c9920c27 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java @@ -944,7 +944,7 @@ static ClusterState innerRemoveIndexTemplateV2(ClusterState currentState, String static Set dataStreamsUsingTemplate(final ClusterState state, final String templateName) { final ComposableIndexTemplate template = state.metadata().templatesV2().get(templateName); - if (template == null) { + if (template == null || template.getDataStreamTemplate() == null) { return Collections.emptySet(); } final Set dataStreams = state.metadata().dataStreams().keySet(); diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java index f26f45b69d133..cb98c34988cbe 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java @@ -560,6 +560,64 @@ public void testRemoveIndexTemplateV2() throws Exception { ClusterState updatedState = MetadataIndexTemplateService.innerRemoveIndexTemplateV2(state, "foo"); assertNull(updatedState.metadata().templatesV2().get("foo")); + + // test remove a template which is not used by a data stream but index patterns can match + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_BLOCKS_READ, randomBoolean()) + .put(IndexMetadata.SETTING_BLOCKS_WRITE, randomBoolean()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 10)) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, randomIntBetween(0, 5)) + .put(IndexMetadata.SETTING_BLOCKS_WRITE, randomBoolean()) + .put(IndexMetadata.SETTING_PRIORITY, randomIntBetween(0, 100000)) + .build(); + CompressedXContent mappings = new CompressedXContent( + "{\"properties\":{\"" + randomAlphaOfLength(5) + "\":{\"type\":\"keyword\"}}}" + ); + + Map meta = Collections.singletonMap(randomAlphaOfLength(4), randomAlphaOfLength(4)); + List indexPatterns = List.of("foo*"); + List componentTemplates = randomList(0, 10, () -> randomAlphaOfLength(5)); + ComposableIndexTemplate templateToRemove = new ComposableIndexTemplate( + indexPatterns, + new Template(settings, mappings, null), + componentTemplates, + randomBoolean() ? null : randomNonNegativeLong(), + randomBoolean() ? null : randomNonNegativeLong(), + meta, + null + ); + + ClusterState stateWithDS = ClusterState.builder(state) + .metadata( + Metadata.builder(state.metadata()) + .put( + new DataStream( + "foo", + new DataStream.TimestampField("@timestamp"), + Collections.singletonList(new Index(".ds-foo-000001", "uuid2")) + ) + ) + .put( + IndexMetadata.builder(".ds-foo-000001") + .settings( + Settings.builder() + .put(IndexMetadata.SETTING_INDEX_UUID, "uuid2") + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .build() + ) + ) + .build() + ) + .build(); + + final ClusterState clusterState = metadataIndexTemplateService.addIndexTemplateV2(stateWithDS, false, "foo", templateToRemove); + assertNotNull(clusterState.metadata().templatesV2().get("foo")); + assertTemplatesEqual(clusterState.metadata().templatesV2().get("foo"), templateToRemove); + + updatedState = MetadataIndexTemplateService.innerRemoveIndexTemplateV2(clusterState, "foo"); + assertNull(updatedState.metadata().templatesV2().get("foo")); } /** From e8c6f0f1b15acdfbeeba84a551a2b42024f8502b Mon Sep 17 00:00:00 2001 From: Craig Perkins Date: Sat, 3 Aug 2024 21:14:39 -0400 Subject: [PATCH 50/68] Update README to 2.17.0 (#15099) Signed-off-by: Craig Perkins --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 17af2911b9221..5d4a9a671c013 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ [![Security Vulnerabilities](https://img.shields.io/github/issues/opensearch-project/OpenSearch/security%20vulnerability?labelColor=red)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"security%20vulnerability") [![Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch)](https://github.com/opensearch-project/OpenSearch/issues) [![Open Pull Requests](https://img.shields.io/github/issues-pr/opensearch-project/OpenSearch)](https://github.com/opensearch-project/OpenSearch/pulls) -[![2.14.1 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v2.14.1)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v2.14.1") +[![2.17.0 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v2.17.0)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v2.17.0") [![3.0.0 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v3.0.0)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v3.0.0") [![GHA gradle check](https://github.com/opensearch-project/OpenSearch/actions/workflows/gradle-check.yml/badge.svg)](https://github.com/opensearch-project/OpenSearch/actions/workflows/gradle-check.yml) [![GHA validate pull request](https://github.com/opensearch-project/OpenSearch/actions/workflows/wrapper.yml/badge.svg)](https://github.com/opensearch-project/OpenSearch/actions/workflows/wrapper.yml) From b911b6f204a0b7b2acc652f6524026f40eee9bea Mon Sep 17 00:00:00 2001 From: gaobinlong Date: Mon, 5 Aug 2024 21:49:19 +0800 Subject: [PATCH 51/68] Fix version check in yml test for the bug fix of delete index template failed (#15101) Signed-off-by: Gao Binlong --- .../test/indices.delete_index_template/10_basic.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete_index_template/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete_index_template/10_basic.yml index c90e83ab59859..c8c08a2d088ac 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete_index_template/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete_index_template/10_basic.yml @@ -40,8 +40,8 @@ teardown: --- "Delete index template which is not used by data stream but index pattern matches": - skip: - version: " - 2.99.99" - reason: "fixed in 3.0.0" + version: " - 2.16.99" + reason: "fixed in 2.17.0" - do: indices.create_data_stream: From 77750066b4dd5b4b0e5b41429a60c369d87ccbf3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Aug 2024 09:28:11 -0700 Subject: [PATCH 52/68] Bump org.tukaani:xz from 1.9 to 1.10 in /plugins/ingest-attachment (#15110) * Bump org.tukaani:xz from 1.9 to 1.10 in /plugins/ingest-attachment Bumps [org.tukaani:xz](https://github.com/tukaani-project/xz-java) from 1.9 to 1.10. - [Release notes](https://github.com/tukaani-project/xz-java/releases) - [Changelog](https://github.com/tukaani-project/xz-java/blob/master/NEWS.md) - [Commits](https://github.com/tukaani-project/xz-java/compare/v1.9...v1.10) --- updated-dependencies: - dependency-name: org.tukaani:xz dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + plugins/ingest-attachment/build.gradle | 2 +- plugins/ingest-attachment/licenses/xz-1.10.jar.sha1 | 1 + plugins/ingest-attachment/licenses/xz-1.9.jar.sha1 | 1 - 4 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 plugins/ingest-attachment/licenses/xz-1.10.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/xz-1.9.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 97464c7659f75..f4db5c3ecb5cc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - OpenJDK Update (July 2024 Patch releases) ([#14998](https://github.com/opensearch-project/OpenSearch/pull/14998)) - Bump `com.microsoft.azure:msal4j` from 1.16.1 to 1.16.2 ([#14995](https://github.com/opensearch-project/OpenSearch/pull/14995)) - Bump `actions/github-script` from 6 to 7 ([#14997](https://github.com/opensearch-project/OpenSearch/pull/14997)) +- Bump `org.tukaani:xz` from 1.9 to 1.10 ([#15110](https://github.com/opensearch-project/OpenSearch/pull/15110)) ### Changed - Add lower limit for primary and replica batch allocators timeout ([#14979](https://github.com/opensearch-project/OpenSearch/pull/14979)) diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index d631855013527..81ac52b97cefa 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -66,7 +66,7 @@ dependencies { runtimeOnly "com.optimaize.languagedetector:language-detector:0.6" runtimeOnly "com.google.guava:guava:${versions.guava}" // Other dependencies - api 'org.tukaani:xz:1.9' + api 'org.tukaani:xz:1.10' api "commons-io:commons-io:${versions.commonsio}" api "org.slf4j:slf4j-api:${versions.slf4j}" diff --git a/plugins/ingest-attachment/licenses/xz-1.10.jar.sha1 b/plugins/ingest-attachment/licenses/xz-1.10.jar.sha1 new file mode 100644 index 0000000000000..e3757c19ce5ab --- /dev/null +++ b/plugins/ingest-attachment/licenses/xz-1.10.jar.sha1 @@ -0,0 +1 @@ +1be8166f89e035a56c6bfc67dbc423996fe577e2 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/xz-1.9.jar.sha1 b/plugins/ingest-attachment/licenses/xz-1.9.jar.sha1 deleted file mode 100644 index c3e22d167212f..0000000000000 --- a/plugins/ingest-attachment/licenses/xz-1.9.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1ea4bec1a921180164852c65006d928617bd2caf \ No newline at end of file From f0ef14d279c6446abfcb38616771da5e344e0de7 Mon Sep 17 00:00:00 2001 From: Neetika Singhal Date: Mon, 5 Aug 2024 10:07:04 -0700 Subject: [PATCH 53/68] Fix NODE_SEARCH_CACHE_SIZE_SETTING initialization for TIERED_REMOTE_INDEX_SETTING feature (#15076) Signed-off-by: Neetika Singhal --- .../opensearch/remotestore/WritableWarmIT.java | 15 +++++++++++++-- .../snapshots/SearchableSnapshotIT.java | 12 ++++++------ .../src/main/java/org/opensearch/node/Node.java | 5 ++--- 3 files changed, 21 insertions(+), 11 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/WritableWarmIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/WritableWarmIT.java index a51bd6b20fff0..88c9ae436e85f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/WritableWarmIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/WritableWarmIT.java @@ -20,6 +20,8 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsException; import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.index.IndexModule; import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.shard.IndexShard; @@ -65,11 +67,20 @@ protected Settings featureFlagSettings() { return featureSettings.build(); } + @Override + protected Settings nodeSettings(int nodeOrdinal) { + ByteSizeValue cacheSize = new ByteSizeValue(16, ByteSizeUnit.GB); + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(Node.NODE_SEARCH_CACHE_SIZE_SETTING.getKey(), cacheSize.toString()) + .build(); + } + public void testWritableWarmFeatureFlagDisabled() { Settings clusterSettings = Settings.builder().put(super.nodeSettings(0)).put(FeatureFlags.TIERED_REMOTE_INDEX, false).build(); InternalTestCluster internalTestCluster = internalCluster(); internalTestCluster.startClusterManagerOnlyNode(clusterSettings); - internalTestCluster.startDataOnlyNode(clusterSettings); + internalTestCluster.startDataAndSearchNodes(1); Settings indexSettings = Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) @@ -94,7 +105,7 @@ public void testWritableWarmFeatureFlagDisabled() { public void testWritableWarmBasic() throws Exception { InternalTestCluster internalTestCluster = internalCluster(); internalTestCluster.startClusterManagerOnlyNode(); - internalTestCluster.startDataOnlyNode(); + internalTestCluster.startDataAndSearchNodes(1); Settings settings = Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java index 1c199df4d548e..a19bbe49ad340 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java @@ -67,7 +67,6 @@ import java.util.stream.StreamSupport; import static org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.FS; -import static org.opensearch.common.util.FeatureFlags.TIERED_REMOTE_INDEX; import static org.opensearch.core.common.util.CollectionUtils.iterableAsArrayList; import static org.opensearch.index.store.remote.filecache.FileCacheSettings.DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING; import static org.opensearch.test.NodeRoles.clusterManagerOnlyNode; @@ -1019,11 +1018,12 @@ public void testStartSearchNode() throws Exception { internalCluster().startNode(Settings.builder().put(onlyRole(DiscoveryNodeRole.SEARCH_ROLE))); // test start node without search role internalCluster().startNode(Settings.builder().put(onlyRole(DiscoveryNodeRole.DATA_ROLE))); - // test start non-dedicated search node with TIERED_REMOTE_INDEX feature enabled - internalCluster().startNode( - Settings.builder() - .put(onlyRoles(Set.of(DiscoveryNodeRole.SEARCH_ROLE, DiscoveryNodeRole.DATA_ROLE))) - .put(TIERED_REMOTE_INDEX, true) + // test start non-dedicated search node, if the user doesn't configure the cache size, it fails + assertThrows( + SettingsException.class, + () -> internalCluster().startNode( + Settings.builder().put(onlyRoles(Set.of(DiscoveryNodeRole.SEARCH_ROLE, DiscoveryNodeRole.DATA_ROLE))) + ) ); // test start non-dedicated search node assertThrows( diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 8684b1b383cab..cbed8dfea8cc4 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -382,7 +382,7 @@ public class Node implements Closeable { public static final Setting NODE_SEARCH_CACHE_SIZE_SETTING = new Setting<>( "node.search.cache.size", - s -> (FeatureFlags.isEnabled(FeatureFlags.TIERED_REMOTE_INDEX_SETTING) || DiscoveryNode.isDedicatedSearchNode(s)) ? "80%" : ZERO, + s -> (DiscoveryNode.isDedicatedSearchNode(s)) ? "80%" : ZERO, Node::validateFileCacheSize, Property.NodeScope ); @@ -2037,8 +2037,7 @@ DiscoveryNode getNode() { * Else it configures the size to 80% of total capacity for a dedicated search node, if not explicitly defined. */ private void initializeFileCache(Settings settings, CircuitBreaker circuitBreaker) throws IOException { - boolean isWritableRemoteIndexEnabled = FeatureFlags.isEnabled(FeatureFlags.TIERED_REMOTE_INDEX_SETTING); - if (DiscoveryNode.isSearchNode(settings) == false && isWritableRemoteIndexEnabled == false) { + if (DiscoveryNode.isSearchNode(settings) == false) { return; } From 7cbff4f9fd8ae53b1672aa8e2582e23bb2c16def Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Aug 2024 12:57:25 -0500 Subject: [PATCH 54/68] Bump actions/setup-java from 1 to 4 (#15104) * Bump actions/setup-java from 1 to 4 Bumps [actions/setup-java](https://github.com/actions/setup-java) from 1 to 4. - [Release notes](https://github.com/actions/setup-java/releases) - [Commits](https://github.com/actions/setup-java/compare/v1...v4) --- updated-dependencies: - dependency-name: actions/setup-java dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- .github/workflows/benchmark-pull-request.yml | 2 +- CHANGELOG.md | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/benchmark-pull-request.yml b/.github/workflows/benchmark-pull-request.yml index 98dd39b1dad54..2a54c2072de59 100644 --- a/.github/workflows/benchmark-pull-request.yml +++ b/.github/workflows/benchmark-pull-request.yml @@ -123,7 +123,7 @@ jobs: ref: ${{ env.prHeadRefSha }} token: ${{ secrets.GITHUB_TOKEN }} - name: Setup Java - uses: actions/setup-java@v1 + uses: actions/setup-java@v4 with: java-version: 21 - name: Build and Assemble OpenSearch from PR diff --git a/CHANGELOG.md b/CHANGELOG.md index f4db5c3ecb5cc..2190f45fc9b09 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.microsoft.azure:msal4j` from 1.16.1 to 1.16.2 ([#14995](https://github.com/opensearch-project/OpenSearch/pull/14995)) - Bump `actions/github-script` from 6 to 7 ([#14997](https://github.com/opensearch-project/OpenSearch/pull/14997)) - Bump `org.tukaani:xz` from 1.9 to 1.10 ([#15110](https://github.com/opensearch-project/OpenSearch/pull/15110)) +- Bump `actions/setup-java` from 1 to 4 ([#15104](https://github.com/opensearch-project/OpenSearch/pull/15104)) ### Changed - Add lower limit for primary and replica batch allocators timeout ([#14979](https://github.com/opensearch-project/OpenSearch/pull/14979)) From caa0a2e05b85003116f5a3788f767370217e490e Mon Sep 17 00:00:00 2001 From: Peter Nied Date: Mon, 5 Aug 2024 14:09:58 -0500 Subject: [PATCH 55/68] Update old untriaged workflow to better issue url (#15086) GitHub doesn't suport dynamic days since created/modified, I've created a simple redirect on my website that will support this use case. See https://peternied.github.io/redirect/issue_search.html for full context on what is avaliable. Source is available on https://github.com/peternied/peternied.github.io Signed-off-by: Peter Nied --- TRIAGING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/TRIAGING.md b/TRIAGING.md index 6791d5944ee6f..53ef77de49159 100644 --- a/TRIAGING.md +++ b/TRIAGING.md @@ -35,7 +35,7 @@ Meeting structure may vary slightly, but the general structure is as follows: - [Core](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+-label%3A%22Search%22%2C%22Search%3ARemote+Search%22%2C%22Search%3AResiliency%22%2C%22Search%3APerformance%22%2C%22Search%3ARelevance%22%2C%22Search%3AAggregations%22%2C%22Search%3AQuery+Capabilities%22%2C%22Search%3AQuery+Insights%22%2C%22Search%3ASearchable+Snapshots%22%2C%22Search%3AUser+Behavior+Insights%22%2C%22Storage%22%2C%22Storage%3AResiliency%22%2C%22Storage%3APerformance%22%2C%22Storage%3ASnapshots%22%2C%22Storage%3ARemote%22%2C%22Storage%3ADurability%22%2C%22Cluster+Manager%22%2C%22ClusterManager%3ARemoteState%22%2C%22Indexing%3AReplication%22%2C%22Indexing%22%2C%22Indexing%3APerformance%22%2C%22Indexing+%26+Search%22) 5. **Attendee Requests:** An opportunity for any meeting member to request consideration of an issue or pull request. 6. **Open Discussion:** Attendees can bring up any topics not already covered by filed issues or pull requests. -7. **Review of Old Untriaged Issues:** Time permitting, each meeting will look at all [untriaged issues older than 14 days](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+created%3A%3C2024-05-20) to prevent issues from falling through the cracks (note the GitHub API does not allow for relative times, so the date in this search must be updated every meeting). +7. **Review of Old Untriaged Issues:** Look at all [untriaged issues older than 14 days](https://peternied.github.io/redirect/issue_search.html?owner=opensearch-project&repo=OpenSearch&tag=untriaged&created-since-days=14) to prevent issues from falling through the cracks. ### What is the role of the facilitator? From 49b7cd47b0f0112ca21d1ea3952106f28f8253bb Mon Sep 17 00:00:00 2001 From: Craig Perkins Date: Mon, 5 Aug 2024 18:06:35 -0400 Subject: [PATCH 56/68] Bump org.apache.avro:avro from 1.11.3 to 1.12.0 in /plugins/repository-hdfs (#15119) * Bump org.apache.avro:avro from 1.11.3 to 1.12.0 in /plugins/repository-hdfs Signed-off-by: Craig Perkins * Add to CHANGELOG Signed-off-by: Craig Perkins --------- Signed-off-by: Craig Perkins --- CHANGELOG.md | 1 + plugins/repository-hdfs/build.gradle | 15 +-------------- .../repository-hdfs/licenses/avro-1.11.3.jar.sha1 | 1 - .../repository-hdfs/licenses/avro-1.12.0.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 15 deletions(-) delete mode 100644 plugins/repository-hdfs/licenses/avro-1.11.3.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/avro-1.12.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 2190f45fc9b09..5c7d7aa9bf780 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `actions/github-script` from 6 to 7 ([#14997](https://github.com/opensearch-project/OpenSearch/pull/14997)) - Bump `org.tukaani:xz` from 1.9 to 1.10 ([#15110](https://github.com/opensearch-project/OpenSearch/pull/15110)) - Bump `actions/setup-java` from 1 to 4 ([#15104](https://github.com/opensearch-project/OpenSearch/pull/15104)) +- Bump `org.apache.avro:avro` from 1.11.3 to 1.12.0 in /plugins/repository-hdfs ([#15119](https://github.com/opensearch-project/OpenSearch/pull/15119)) ### Changed - Add lower limit for primary and replica batch allocators timeout ([#14979](https://github.com/opensearch-project/OpenSearch/pull/14979)) diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 884fb1333404a..f117bae658abe 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -66,7 +66,7 @@ dependencies { } api 'org.apache.htrace:htrace-core4:4.2.0-incubating' api "org.apache.logging.log4j:log4j-core:${versions.log4j}" - api 'org.apache.avro:avro:1.11.3' + api 'org.apache.avro:avro:1.12.0' api 'com.google.code.gson:gson:2.11.0' runtimeOnly "com.google.guava:guava:${versions.guava}" api "commons-logging:commons-logging:${versions.commonslogging}" @@ -425,19 +425,6 @@ thirdPartyAudit { 'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper', 'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper$1', 'org.apache.hadoop.shaded.org.xbill.DNS.spi.DNSJavaNameServiceDescriptor', - - 'org.apache.avro.reflect.FieldAccessUnsafe', - 'org.apache.avro.reflect.FieldAccessUnsafe$UnsafeBooleanField', - 'org.apache.avro.reflect.FieldAccessUnsafe$UnsafeByteField', - 'org.apache.avro.reflect.FieldAccessUnsafe$UnsafeCachedField', - 'org.apache.avro.reflect.FieldAccessUnsafe$UnsafeCharField', - 'org.apache.avro.reflect.FieldAccessUnsafe$UnsafeCustomEncodedField', - 'org.apache.avro.reflect.FieldAccessUnsafe$UnsafeDoubleField', - 'org.apache.avro.reflect.FieldAccessUnsafe$UnsafeFloatField', - 'org.apache.avro.reflect.FieldAccessUnsafe$UnsafeIntField', - 'org.apache.avro.reflect.FieldAccessUnsafe$UnsafeLongField', - 'org.apache.avro.reflect.FieldAccessUnsafe$UnsafeObjectField', - 'org.apache.avro.reflect.FieldAccessUnsafe$UnsafeShortField', ) } diff --git a/plugins/repository-hdfs/licenses/avro-1.11.3.jar.sha1 b/plugins/repository-hdfs/licenses/avro-1.11.3.jar.sha1 deleted file mode 100644 index fb43ecbcf22c9..0000000000000 --- a/plugins/repository-hdfs/licenses/avro-1.11.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -02b463409b373bff9ece09f54a43d42da5cea55a \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/avro-1.12.0.jar.sha1 b/plugins/repository-hdfs/licenses/avro-1.12.0.jar.sha1 new file mode 100644 index 0000000000000..83f7bb3677159 --- /dev/null +++ b/plugins/repository-hdfs/licenses/avro-1.12.0.jar.sha1 @@ -0,0 +1 @@ +6e692a464b213f6df49f8e3e7fcf42df0dbb7639 \ No newline at end of file From 7769ce5a9310e56169b7310cca71f1994ee4f8cc Mon Sep 17 00:00:00 2001 From: Craig Perkins Date: Tue, 6 Aug 2024 11:30:41 -0400 Subject: [PATCH 57/68] Bump org.bouncycastle:bcpg-fips from 1.0.7.1 to 2.0.8 and org.bouncycastle:bc-fips from 1.0.2.5 to 2.0.0 (#15122) * Bump org.bouncycastle:bcpg-fips from 1.0.7.1 to 2.0.8 and org.bouncycastle:bc-fips from 1.0.2.5 to 2.0.0 in /distribution/tools/plugin-cli Signed-off-by: Craig Perkins * Add to CHANGELOG Signed-off-by: Craig Perkins --------- Signed-off-by: Craig Perkins --- CHANGELOG.md | 1 + distribution/tools/plugin-cli/build.gradle | 31 ++----------------- .../licenses/bc-fips-1.0.2.5.jar.sha1 | 1 - .../licenses/bc-fips-2.0.0.jar.sha1 | 1 + .../licenses/bcpg-fips-1.0.7.1.jar.sha1 | 1 - .../licenses/bcpg-fips-2.0.8.jar.sha1 | 1 + 6 files changed, 5 insertions(+), 31 deletions(-) delete mode 100644 distribution/tools/plugin-cli/licenses/bc-fips-1.0.2.5.jar.sha1 create mode 100644 distribution/tools/plugin-cli/licenses/bc-fips-2.0.0.jar.sha1 delete mode 100644 distribution/tools/plugin-cli/licenses/bcpg-fips-1.0.7.1.jar.sha1 create mode 100644 distribution/tools/plugin-cli/licenses/bcpg-fips-2.0.8.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 5c7d7aa9bf780..061e3280852e8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.tukaani:xz` from 1.9 to 1.10 ([#15110](https://github.com/opensearch-project/OpenSearch/pull/15110)) - Bump `actions/setup-java` from 1 to 4 ([#15104](https://github.com/opensearch-project/OpenSearch/pull/15104)) - Bump `org.apache.avro:avro` from 1.11.3 to 1.12.0 in /plugins/repository-hdfs ([#15119](https://github.com/opensearch-project/OpenSearch/pull/15119)) +- Bump `org.bouncycastle:bcpg-fips` from 1.0.7.1 to 2.0.8 and `org.bouncycastle:bc-fips` from 1.0.2.5 to 2.0.0 in /distribution/tools/plugin-cli ([#15103](https://github.com/opensearch-project/OpenSearch/pull/15103)) ### Changed - Add lower limit for primary and replica batch allocators timeout ([#14979](https://github.com/opensearch-project/OpenSearch/pull/14979)) diff --git a/distribution/tools/plugin-cli/build.gradle b/distribution/tools/plugin-cli/build.gradle index 3083ad4375460..a619ba1acf6a7 100644 --- a/distribution/tools/plugin-cli/build.gradle +++ b/distribution/tools/plugin-cli/build.gradle @@ -37,8 +37,8 @@ base { dependencies { compileOnly project(":server") compileOnly project(":libs:opensearch-cli") - api "org.bouncycastle:bcpg-fips:1.0.7.1" - api "org.bouncycastle:bc-fips:1.0.2.5" + api "org.bouncycastle:bcpg-fips:2.0.8" + api "org.bouncycastle:bc-fips:2.0.0" testImplementation project(":test:framework") testImplementation 'com.google.jimfs:jimfs:1.3.0' testRuntimeOnly("com.google.guava:guava:${versions.guava}") { @@ -58,33 +58,6 @@ test { jvmArgs += [ "-Djava.security.egd=file:/dev/urandom" ] } -/* - * these two classes intentionally use the following JDK internal APIs in order to offer the necessary - * functionality - * - * sun.security.internal.spec.TlsKeyMaterialParameterSpec - * sun.security.internal.spec.TlsKeyMaterialSpec - * sun.security.internal.spec.TlsMasterSecretParameterSpec - * sun.security.internal.spec.TlsPrfParameterSpec - * sun.security.internal.spec.TlsRsaPremasterSecretParameterSpec - * sun.security.provider.SecureRandom - * - */ -thirdPartyAudit.ignoreViolations( - 'org.bouncycastle.jcajce.provider.BouncyCastleFipsProvider$CoreSecureRandom', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$BaseTLSKeyGeneratorSpi', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSKeyMaterialGenerator', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSKeyMaterialGenerator$2', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSMasterSecretGenerator', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSMasterSecretGenerator$2', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSPRFKeyGenerator', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSRsaPreMasterSecretGenerator', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSRsaPreMasterSecretGenerator$2', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSExtendedMasterSecretGenerator', - 'org.bouncycastle.jcajce.provider.ProvSunTLSKDF$TLSExtendedMasterSecretGenerator$2' -) - thirdPartyAudit.ignoreMissingClasses( 'org.brotli.dec.BrotliInputStream', 'org.objectweb.asm.AnnotationVisitor', diff --git a/distribution/tools/plugin-cli/licenses/bc-fips-1.0.2.5.jar.sha1 b/distribution/tools/plugin-cli/licenses/bc-fips-1.0.2.5.jar.sha1 deleted file mode 100644 index 1b44c77dd4ee1..0000000000000 --- a/distribution/tools/plugin-cli/licenses/bc-fips-1.0.2.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -704e65f7e4fe679e5ab2aa8a840f27f8ced4c522 \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/bc-fips-2.0.0.jar.sha1 b/distribution/tools/plugin-cli/licenses/bc-fips-2.0.0.jar.sha1 new file mode 100644 index 0000000000000..79f0e3e9930bb --- /dev/null +++ b/distribution/tools/plugin-cli/licenses/bc-fips-2.0.0.jar.sha1 @@ -0,0 +1 @@ +ee9ac432cf08f9a9ebee35d7cf8a45f94959a7ab \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/bcpg-fips-1.0.7.1.jar.sha1 b/distribution/tools/plugin-cli/licenses/bcpg-fips-1.0.7.1.jar.sha1 deleted file mode 100644 index 44cebc7c92d87..0000000000000 --- a/distribution/tools/plugin-cli/licenses/bcpg-fips-1.0.7.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5e1952428655ea822066f86df2e3ecda8fa0ba2b \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/bcpg-fips-2.0.8.jar.sha1 b/distribution/tools/plugin-cli/licenses/bcpg-fips-2.0.8.jar.sha1 new file mode 100644 index 0000000000000..758ee2fdf9de6 --- /dev/null +++ b/distribution/tools/plugin-cli/licenses/bcpg-fips-2.0.8.jar.sha1 @@ -0,0 +1 @@ +51c2f633e0c32d10de1ebab4c86f93310ff820f8 \ No newline at end of file From 76b9931299b4a45308b7ede4659e750eacd5006a Mon Sep 17 00:00:00 2001 From: David Zane <38449481+dzane17@users.noreply.github.com> Date: Tue, 6 Aug 2024 09:33:34 -0700 Subject: [PATCH 58/68] Add took time to request nodes stats (#15054) Signed-off-by: David Zane --- CHANGELOG-3.0.md | 1 + .../action/search/SearchRequestStats.java | 31 ++++++++++++++++ .../index/search/stats/SearchStats.java | 24 ++++++++++++- .../search/SearchRequestStatsTests.java | 35 +++++++++++++++++++ 4 files changed, 90 insertions(+), 1 deletion(-) diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index 48d978bede420..78e93eed0158a 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -13,6 +13,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - GHA to verify checklist items completion in PR descriptions ([#10800](https://github.com/opensearch-project/OpenSearch/pull/10800)) - Allow to pass the list settings through environment variables (like [], ["a", "b", "c"], ...) ([#10625](https://github.com/opensearch-project/OpenSearch/pull/10625)) - Views, simplify data access and manipulation by providing a virtual layer over one or more indices ([#11957](https://github.com/opensearch-project/OpenSearch/pull/11957)) +- Add took time to request nodes stats ([#15054](https://github.com/opensearch-project/OpenSearch/pull/15054)) ### Dependencies diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java b/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java index 97ef94055faf7..d1d5f568fc09d 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java @@ -27,6 +27,7 @@ @PublicApi(since = "2.11.0") public final class SearchRequestStats extends SearchRequestOperationsListener { Map phaseStatsMap = new EnumMap<>(SearchPhaseName.class); + StatsHolder tookStatsHolder; public static final String SEARCH_REQUEST_STATS_ENABLED_KEY = "search.request_stats_enabled"; public static final Setting SEARCH_REQUEST_STATS_ENABLED = Setting.boolSetting( @@ -40,6 +41,7 @@ public final class SearchRequestStats extends SearchRequestOperationsListener { public SearchRequestStats(ClusterSettings clusterSettings) { this.setEnabled(clusterSettings.get(SEARCH_REQUEST_STATS_ENABLED)); clusterSettings.addSettingsUpdateConsumer(SEARCH_REQUEST_STATS_ENABLED, this::setEnabled); + tookStatsHolder = new StatsHolder(); for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { phaseStatsMap.put(searchPhaseName, new StatsHolder()); } @@ -57,6 +59,18 @@ public long getPhaseMetric(SearchPhaseName searchPhaseName) { return phaseStatsMap.get(searchPhaseName).timing.sum(); } + public long getTookCurrent() { + return tookStatsHolder.current.count(); + } + + public long getTookTotal() { + return tookStatsHolder.total.count(); + } + + public long getTookMetric() { + return tookStatsHolder.timing.sum(); + } + @Override protected void onPhaseStart(SearchPhaseContext context) { phaseStatsMap.get(context.getCurrentPhase().getSearchPhaseName()).current.inc(); @@ -75,6 +89,23 @@ protected void onPhaseFailure(SearchPhaseContext context, Throwable cause) { phaseStatsMap.get(context.getCurrentPhase().getSearchPhaseName()).current.dec(); } + @Override + protected void onRequestStart(SearchRequestContext searchRequestContext) { + tookStatsHolder.current.inc(); + } + + @Override + protected void onRequestEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + tookStatsHolder.current.dec(); + tookStatsHolder.total.inc(); + tookStatsHolder.timing.inc(TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - searchRequestContext.getAbsoluteStartNanos())); + } + + @Override + protected void onRequestFailure(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + tookStatsHolder.current.dec(); + } + /** * Holder of statistics values * diff --git a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java index bb61e1afa05f4..d6ea803c9ee13 100644 --- a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java +++ b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java @@ -110,7 +110,7 @@ public void writeTo(StreamOutput out) throws IOException { } /** - * Holds requests stats for different phases. + * Holds all requests stats. * * @opensearch.api */ @@ -124,6 +124,7 @@ public Map getRequestStatsHolder() { } RequestStatsLongHolder() { + requestStatsHolder.put(Fields.TOOK, new PhaseStatsLongHolder()); for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { requestStatsHolder.put(searchPhaseName.getName(), new PhaseStatsLongHolder()); } @@ -512,6 +513,15 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (requestStatsLongHolder != null) { builder.startObject(Fields.REQUEST); + PhaseStatsLongHolder tookStatsLongHolder = requestStatsLongHolder.requestStatsHolder.get(Fields.TOOK); + if (tookStatsLongHolder != null) { + builder.startObject(Fields.TOOK); + builder.humanReadableField(Fields.TIME_IN_MILLIS, Fields.TIME, new TimeValue(tookStatsLongHolder.timeInMillis)); + builder.field(Fields.CURRENT, tookStatsLongHolder.current); + builder.field(Fields.TOTAL, tookStatsLongHolder.total); + builder.endObject(); + } + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { PhaseStatsLongHolder statsLongHolder = requestStatsLongHolder.requestStatsHolder.get(searchPhaseName.getName()); if (statsLongHolder == null) { @@ -545,6 +555,17 @@ public void setSearchRequestStats(SearchRequestStats searchRequestStats) { totalStats.requestStatsLongHolder = new RequestStatsLongHolder(); } + // Set took stats + totalStats.requestStatsLongHolder.requestStatsHolder.put( + Fields.TOOK, + new PhaseStatsLongHolder( + searchRequestStats.getTookCurrent(), + searchRequestStats.getTookTotal(), + searchRequestStats.getTookMetric() + ) + ); + + // Set phase stats for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { totalStats.requestStatsLongHolder.requestStatsHolder.put( searchPhaseName.getName(), @@ -678,6 +699,7 @@ static final class Fields { static final String CURRENT = "current"; static final String TOTAL = "total"; static final String SEARCH_IDLE_REACTIVATE_COUNT_TOTAL = "search_idle_reactivate_count_total"; + static final String TOOK = "took"; } diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestStatsTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestStatsTests.java index 1af3eb2738a58..3bad3ec3e7d21 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchRequestStatsTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestStatsTests.java @@ -25,6 +25,41 @@ import static org.mockito.Mockito.when; public class SearchRequestStatsTests extends OpenSearchTestCase { + public void testSearchRequestStats_OnRequestFailure() { + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + SearchRequestStats testRequestStats = new SearchRequestStats(clusterSettings); + SearchPhaseContext mockSearchPhaseContext = mock(SearchPhaseContext.class); + SearchRequestContext mockSearchRequestContext = mock(SearchRequestContext.class); + + testRequestStats.onRequestStart(mockSearchRequestContext); + assertEquals(1, testRequestStats.getTookCurrent()); + testRequestStats.onRequestFailure(mockSearchPhaseContext, mockSearchRequestContext); + assertEquals(0, testRequestStats.getTookCurrent()); + assertEquals(0, testRequestStats.getTookTotal()); + } + + public void testSearchRequestStats_OnRequestEnd() { + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + SearchRequestStats testRequestStats = new SearchRequestStats(clusterSettings); + SearchPhaseContext mockSearchPhaseContext = mock(SearchPhaseContext.class); + SearchRequestContext mockSearchRequestContext = mock(SearchRequestContext.class); + + // Start request + testRequestStats.onRequestStart(mockSearchRequestContext); + assertEquals(1, testRequestStats.getTookCurrent()); + + // Mock start time + long tookTimeInMillis = randomIntBetween(1, 10); + long startTimeInNanos = System.nanoTime() - TimeUnit.MILLISECONDS.toNanos(tookTimeInMillis); + when(mockSearchRequestContext.getAbsoluteStartNanos()).thenReturn(startTimeInNanos); + + // End request + testRequestStats.onRequestEnd(mockSearchPhaseContext, mockSearchRequestContext); + assertEquals(0, testRequestStats.getTookCurrent()); + assertEquals(1, testRequestStats.getTookTotal()); + assertThat(testRequestStats.getTookMetric(), greaterThanOrEqualTo(tookTimeInMillis)); + } + public void testSearchRequestPhaseFailure() { ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); SearchRequestStats testRequestStats = new SearchRequestStats(clusterSettings); From c7254315b7d801951547b5f4bd3f2c89ac484c71 Mon Sep 17 00:00:00 2001 From: gaobinlong Date: Wed, 7 Aug 2024 01:17:01 +0800 Subject: [PATCH 59/68] Fix version check for adding rangeQuery and regexpQuery support for constant_keyword field type (#15127) Signed-off-by: Gao Binlong --- .../{110_constant_keyword.yml => 115_constant_keyword.yml} | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) rename rest-api-spec/src/main/resources/rest-api-spec/test/index/{110_constant_keyword.yml => 115_constant_keyword.yml} (98%) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/110_constant_keyword.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/115_constant_keyword.yml similarity index 98% rename from rest-api-spec/src/main/resources/rest-api-spec/test/index/110_constant_keyword.yml rename to rest-api-spec/src/main/resources/rest-api-spec/test/index/115_constant_keyword.yml index 1c50187534026..e60981dbbf50c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/110_constant_keyword.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/115_constant_keyword.yml @@ -70,8 +70,8 @@ --- "Queries": - skip: - version: " - 2.99.99" - reason: "rangeQuery and regexpQuery are supported in 3.0.0 in main branch" + version: " - 2.16.99" + reason: "rangeQuery and regexpQuery are introduced in 2.17.0" - do: indices.create: From 2829a89f1484cc92e74e56a2695f691e375948c6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 6 Aug 2024 15:16:59 -0400 Subject: [PATCH 60/68] Bump com.azure:azure-core from 1.49.1 to 1.51.0 in /plugins/repository-azure (#15111) * Bump com.azure:azure-core in /plugins/repository-azure Bumps [com.azure:azure-core](https://github.com/Azure/azure-sdk-for-java) from 1.49.1 to 1.51.0. - [Release notes](https://github.com/Azure/azure-sdk-for-java/releases) - [Commits](https://github.com/Azure/azure-sdk-for-java/compare/azure-core_1.49.1...azure-core_1.51.0) --- updated-dependencies: - dependency-name: com.azure:azure-core dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + plugins/repository-azure/build.gradle | 2 +- plugins/repository-azure/licenses/azure-core-1.49.1.jar.sha1 | 1 - plugins/repository-azure/licenses/azure-core-1.51.0.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-azure/licenses/azure-core-1.49.1.jar.sha1 create mode 100644 plugins/repository-azure/licenses/azure-core-1.51.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 061e3280852e8..5d1650c8341a7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `actions/setup-java` from 1 to 4 ([#15104](https://github.com/opensearch-project/OpenSearch/pull/15104)) - Bump `org.apache.avro:avro` from 1.11.3 to 1.12.0 in /plugins/repository-hdfs ([#15119](https://github.com/opensearch-project/OpenSearch/pull/15119)) - Bump `org.bouncycastle:bcpg-fips` from 1.0.7.1 to 2.0.8 and `org.bouncycastle:bc-fips` from 1.0.2.5 to 2.0.0 in /distribution/tools/plugin-cli ([#15103](https://github.com/opensearch-project/OpenSearch/pull/15103)) +- Bump `com.azure:azure-core` from 1.49.1 to 1.51.0 ([#15111](https://github.com/opensearch-project/OpenSearch/pull/15111)) ### Changed - Add lower limit for primary and replica batch allocators timeout ([#14979](https://github.com/opensearch-project/OpenSearch/pull/14979)) diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 15e3158f2dbc4..80809e067f65a 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -44,7 +44,7 @@ opensearchplugin { } dependencies { - api 'com.azure:azure-core:1.49.1' + api 'com.azure:azure-core:1.51.0' api 'com.azure:azure-json:1.1.0' api 'com.azure:azure-xml:1.0.0' api 'com.azure:azure-storage-common:12.25.1' diff --git a/plugins/repository-azure/licenses/azure-core-1.49.1.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.49.1.jar.sha1 deleted file mode 100644 index d487c08c26e94..0000000000000 --- a/plugins/repository-azure/licenses/azure-core-1.49.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a7c44282eaa0f5a3be4b920d6a057509adfe8674 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-1.51.0.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.51.0.jar.sha1 new file mode 100644 index 0000000000000..7200f59af2f9a --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-1.51.0.jar.sha1 @@ -0,0 +1 @@ +ff5d0aedf75ca45ec0ace24673f790d2f7a57096 \ No newline at end of file From f980924136e4d689581c2346d3def8580c178087 Mon Sep 17 00:00:00 2001 From: Rishabh Singh Date: Tue, 6 Aug 2024 13:29:15 -0700 Subject: [PATCH 61/68] Add baseline-cluster-config key to benchmark config (#15134) Signed-off-by: Rishabh Singh --- .github/benchmark-configs.json | 34 +++++++++++++------- .github/workflows/benchmark-pull-request.yml | 2 ++ 2 files changed, 24 insertions(+), 12 deletions(-) diff --git a/.github/benchmark-configs.json b/.github/benchmark-configs.json index 5b44198cd3b8e..8f4bad040fe44 100644 --- a/.github/benchmark-configs.json +++ b/.github/benchmark-configs.json @@ -14,7 +14,8 @@ "cluster_configuration": { "size": "Single-Node", "data_instance_config": "4vCPU, 32G Mem, 16G Heap" - } + }, + "baseline_cluster_config": "x64-r5.xlarge-single-node-1-shard-0-replica-baseline" }, "id_2": { "description": "Indexing only configuration for HTTP_LOGS workload", @@ -30,7 +31,8 @@ "cluster_configuration": { "size": "Single-Node", "data_instance_config": "4vCPU, 32G Mem, 16G Heap" - } + }, + "baseline_cluster_config": "x64-r5.xlarge-single-node-1-shard-0-replica-baseline" }, "id_3": { "description": "Search only test-procedure for NYC_TAXIS, uses snapshot to restore the data for OS-3.0.0", @@ -46,7 +48,8 @@ "cluster_configuration": { "size": "Single-Node", "data_instance_config": "4vCPU, 32G Mem, 16G Heap" - } + }, + "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" }, "id_4": { "description": "Search only test-procedure for HTTP_LOGS, uses snapshot to restore the data for OS-3.0.0", @@ -62,10 +65,11 @@ "cluster_configuration": { "size": "Single-Node", "data_instance_config": "4vCPU, 32G Mem, 16G Heap" - } + }, + "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" }, "id_5": { - "description": "Search only test-procedure for HTTP_LOGS, uses snapshot to restore the data for OS-3.0.0", + "description": "Search only test-procedure for big5, uses snapshot to restore the data for OS-3.0.0", "supported_major_versions": ["3"], "cluster-benchmark-configs": { "SINGLE_NODE_CLUSTER": "true", @@ -78,7 +82,8 @@ "cluster_configuration": { "size": "Single-Node", "data_instance_config": "4vCPU, 32G Mem, 16G Heap" - } + }, + "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" }, "id_6": { "description": "Search only test-procedure for NYC_TAXIS, uses snapshot to restore the data for OS-2.x", @@ -94,7 +99,8 @@ "cluster_configuration": { "size": "Single-Node", "data_instance_config": "4vCPU, 32G Mem, 16G Heap" - } + }, + "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" }, "id_7": { "description": "Search only test-procedure for HTTP_LOGS, uses snapshot to restore the data for OS-2.x", @@ -110,10 +116,11 @@ "cluster_configuration": { "size": "Single-Node", "data_instance_config": "4vCPU, 32G Mem, 16G Heap" - } + }, + "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" }, "id_8": { - "description": "Search only test-procedure for HTTP_LOGS, uses snapshot to restore the data for OS-2.x", + "description": "Search only test-procedure for big5, uses snapshot to restore the data for OS-2.x", "supported_major_versions": ["2"], "cluster-benchmark-configs": { "SINGLE_NODE_CLUSTER": "true", @@ -126,7 +133,8 @@ "cluster_configuration": { "size": "Single-Node", "data_instance_config": "4vCPU, 32G Mem, 16G Heap" - } + }, + "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" }, "id_9": { "description": "Indexing and search configuration for pmc workload", @@ -141,7 +149,8 @@ "cluster_configuration": { "size": "Single-Node", "data_instance_config": "4vCPU, 32G Mem, 16G Heap" - } + }, + "baseline_cluster_config": "x64-r5.xlarge-single-node-1-shard-0-replica-baseline" }, "id_10": { "description": "Indexing only configuration for stack-overflow workload", @@ -156,6 +165,7 @@ "cluster_configuration": { "size": "Single-Node", "data_instance_config": "4vCPU, 32G Mem, 16G Heap" - } + }, + "baseline_cluster_config": "x64-r5.xlarge-single-node-1-shard-0-replica-baseline" } } diff --git a/.github/workflows/benchmark-pull-request.yml b/.github/workflows/benchmark-pull-request.yml index 2a54c2072de59..1096014e4a291 100644 --- a/.github/workflows/benchmark-pull-request.yml +++ b/.github/workflows/benchmark-pull-request.yml @@ -60,6 +60,8 @@ jobs: for (const [key, value] of Object.entries(clusterBenchmarkConfigs)) { core.exportVariable(key, value); } + if (benchmarkConfigs[configId].hasOwnProperty('baseline_cluster_config')) { + core.exportVariable('BASELINE_CLUSTER_CONFIG', benchmarkConfigs[configId]['baseline_cluster_config']); - name: Post invalid format comment if: steps.check_comment.outputs.invalid == 'true' uses: actions/github-script@v7 From b47b401b5a3c15304fc07b2bad9621c9dea122da Mon Sep 17 00:00:00 2001 From: Liyun Xiu Date: Wed, 7 Aug 2024 05:31:12 +0800 Subject: [PATCH 62/68] Fix bulk ingest NPE with empty pipeline (#15033) Signed-off-by: Liyun Xiu --- CHANGELOG.md | 1 + .../org/opensearch/ingest/IngestService.java | 10 +++++- .../opensearch/ingest/IngestServiceTests.java | 36 +++++++++++++++++++ 3 files changed, 46 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5d1650c8341a7..be5e5598b09c2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -36,6 +36,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Fixed - Fix constraint bug which allows more primary shards than average primary shards per index ([#14908](https://github.com/opensearch-project/OpenSearch/pull/14908)) +- Fix NPE when bulk ingest with empty pipeline ([#15033](https://github.com/opensearch-project/OpenSearch/pull/15033)) - Fix missing value of FieldSort for unsigned_long ([#14963](https://github.com/opensearch-project/OpenSearch/pull/14963)) - Fix delete index template failed when the index template matches a data stream but is unused ([#15080](https://github.com/opensearch-project/OpenSearch/pull/15080)) diff --git a/server/src/main/java/org/opensearch/ingest/IngestService.java b/server/src/main/java/org/opensearch/ingest/IngestService.java index 17eb23422e68b..938ca7493926e 100644 --- a/server/src/main/java/org/opensearch/ingest/IngestService.java +++ b/server/src/main/java/org/opensearch/ingest/IngestService.java @@ -997,7 +997,7 @@ private void innerBatchExecute( Consumer> handler ) { if (pipeline.getProcessors().isEmpty()) { - handler.accept(null); + handler.accept(toIngestDocumentWrappers(slots, indexRequests)); return; } @@ -1271,6 +1271,14 @@ private static IngestDocumentWrapper toIngestDocumentWrapper(int slot, IndexRequ return new IngestDocumentWrapper(slot, toIngestDocument(indexRequest), null); } + private static List toIngestDocumentWrappers(List slots, List indexRequests) { + List ingestDocumentWrappers = new ArrayList<>(); + for (int i = 0; i < slots.size(); ++i) { + ingestDocumentWrappers.add(toIngestDocumentWrapper(slots.get(i), indexRequests.get(i))); + } + return ingestDocumentWrappers; + } + private static Map createSlotIndexRequestMap(List slots, List indexRequests) { Map slotIndexRequestMap = new HashMap<>(); for (int i = 0; i < slots.size(); ++i) { diff --git a/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java b/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java index 166b94966196c..1f4b1d635d438 100644 --- a/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java +++ b/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java @@ -1995,6 +1995,42 @@ public void testExecuteBulkRequestInBatchWithDefaultBatchSize() { verify(mockCompoundProcessor, never()).execute(any(), any()); } + public void testExecuteEmptyPipelineInBatch() throws Exception { + IngestService ingestService = createWithProcessors(emptyMap()); + PutPipelineRequest putRequest = new PutPipelineRequest( + "_id", + new BytesArray("{\"processors\": [], \"description\": \"_description\"}"), + MediaTypeRegistry.JSON + ); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + BulkRequest bulkRequest = new BulkRequest(); + IndexRequest indexRequest1 = new IndexRequest("_index").id("_id1").source(emptyMap()).setPipeline("_id").setFinalPipeline("_none"); + bulkRequest.add(indexRequest1); + IndexRequest indexRequest2 = new IndexRequest("_index").id("_id2").source(emptyMap()).setPipeline("_id").setFinalPipeline("_none"); + bulkRequest.add(indexRequest2); + IndexRequest indexRequest3 = new IndexRequest("_index").id("_id3").source(emptyMap()).setPipeline("_id").setFinalPipeline("_none"); + bulkRequest.add(indexRequest3); + IndexRequest indexRequest4 = new IndexRequest("_index").id("_id4").source(emptyMap()).setPipeline("_id").setFinalPipeline("_none"); + bulkRequest.add(indexRequest4); + bulkRequest.batchSize(4); + final Map failureHandler = new HashMap<>(); + final Map completionHandler = new HashMap<>(); + ingestService.executeBulkRequest( + 4, + bulkRequest.requests(), + failureHandler::put, + completionHandler::put, + indexReq -> {}, + Names.WRITE, + bulkRequest + ); + assertTrue(failureHandler.isEmpty()); + assertEquals(Set.of(Thread.currentThread()), completionHandler.keySet()); + } + public void testPrepareBatches_same_index_pipeline() { IngestService.IndexRequestWrapper wrapper1 = createIndexRequestWrapper("index1", Collections.singletonList("p1")); IngestService.IndexRequestWrapper wrapper2 = createIndexRequestWrapper("index1", Collections.singletonList("p1")); From 212597e41717d1474b143721a29fd6b6e9f8f2fd Mon Sep 17 00:00:00 2001 From: Jay Deng Date: Tue, 6 Aug 2024 15:26:12 -0700 Subject: [PATCH 63/68] CODEOWNERS personalizations for jed326 (#15137) Signed-off-by: Jay Deng --- .github/CODEOWNERS | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 1aefeee710f47..fb7d73f599670 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -13,15 +13,25 @@ # Default ownership for all repo files * @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/modules/lang-painless/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/modules/parent-join/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah /modules/transport-netty4/ @peternied /plugins/identity-shiro/ @peternied +/server/src/internalClusterTest/java/org/opensearch/index/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/server/src/internalClusterTest/java/org/opensearch/search/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah + /server/src/main/java/org/opensearch/extensions/ @peternied /server/src/main/java/org/opensearch/identity/ @peternied -/server/src/main/java/org/opensearch/threadpool/ @peternied +/server/src/main/java/org/opensearch/index/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/server/src/main/java/org/opensearch/search/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/server/src/main/java/org/opensearch/threadpool/ @jed326 @peternied /server/src/main/java/org/opensearch/transport/ @peternied -/.github/ @peternied +/server/src/test/java/org/opensearch/index/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah +/server/src/test/java/org/opensearch/search/ @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah + +/.github/ @jed326 @peternied /MAINTAINERS.md @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gaobinlong @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @peternied @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah From a918530397e3e70d5171d879b2238b3005e5c66f Mon Sep 17 00:00:00 2001 From: Bharathwaj G Date: Wed, 7 Aug 2024 12:27:21 +0530 Subject: [PATCH 64/68] Add changes to build star tree in off heap (#14817) --------- Signed-off-by: Bharathwaj G --- .../index/mapper/StarTreeMapperIT.java | 4 +- .../common/util/ByteArrayBackedBitset.java | 86 +++ .../composite/Composite99DocValuesWriter.java | 8 +- .../aggregators/CountValueAggregator.java | 5 + .../aggregators/SumValueAggregator.java | 5 + .../startree/aggregators/ValueAggregator.java | 5 + .../builder/AbstractDocumentsFileManager.java | 231 ++++++++ .../startree/builder/BaseStarTreeBuilder.java | 48 +- .../builder/OffHeapStarTreeBuilder.java | 334 ++++++++++++ .../builder/OnHeapStarTreeBuilder.java | 24 +- .../builder/SegmentDocsFileManager.java | 103 ++++ .../builder/StarTreeDocsFileManager.java | 294 ++++++++++ .../startree/builder/StarTreesBuilder.java | 13 +- .../utils/StarTreeDocumentBitSetUtil.java | 57 ++ .../utils/StarTreeDocumentsSorter.java | 66 +++ .../datacube/startree/utils/TreeNode.java | 4 + .../index/mapper/StarTreeMapper.java | 3 +- .../builder/AbstractStarTreeBuilderTests.java | 513 ++++++++++++++++-- .../builder/OffHeapStarTreeBuilderTests.java | 26 + .../builder/StarTreesBuilderTests.java | 10 +- .../SequentialDocValuesIteratorTests.java | 2 - .../StarTreeDocumentBitSetUtilTests.java | 72 +++ .../utils/StarTreeDocumentsSorterTests.java | 201 +++++++ .../index/mapper/StarTreeMapperTests.java | 4 +- 24 files changed, 2028 insertions(+), 90 deletions(-) create mode 100644 server/src/main/java/org/opensearch/common/util/ByteArrayBackedBitset.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/AbstractDocumentsFileManager.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/OffHeapStarTreeBuilder.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/SegmentDocsFileManager.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeDocsFileManager.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeDocumentBitSetUtil.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeDocumentsSorter.java create mode 100644 server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/OffHeapStarTreeBuilderTests.java create mode 100644 server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeDocumentBitSetUtilTests.java create mode 100644 server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeDocumentsSorterTests.java diff --git a/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java b/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java index 8e5193b650868..1cabb8b617ce3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java @@ -275,7 +275,7 @@ public void testValidCompositeIndex() { assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); assertEquals(10000, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); assertEquals( - StarTreeFieldConfiguration.StarTreeBuildMode.ON_HEAP, + StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP, starTreeFieldType.getStarTreeConfig().getBuildMode() ); assertEquals(Collections.emptySet(), starTreeFieldType.getStarTreeConfig().getSkipStarNodeCreationInDims()); @@ -359,7 +359,7 @@ public void testUpdateIndexWhenMappingIsSame() { assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); assertEquals(10000, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); assertEquals( - StarTreeFieldConfiguration.StarTreeBuildMode.ON_HEAP, + StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP, starTreeFieldType.getStarTreeConfig().getBuildMode() ); assertEquals(Collections.emptySet(), starTreeFieldType.getStarTreeConfig().getSkipStarNodeCreationInDims()); diff --git a/server/src/main/java/org/opensearch/common/util/ByteArrayBackedBitset.java b/server/src/main/java/org/opensearch/common/util/ByteArrayBackedBitset.java new file mode 100644 index 0000000000000..2d7948d414937 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/util/ByteArrayBackedBitset.java @@ -0,0 +1,86 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util; + +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.store.RandomAccessInput; + +import java.io.IOException; + +/** + * A bitset backed by a byte array. This will initialize and set bits in the byte array based on the index. + */ +public class ByteArrayBackedBitset { + private final byte[] byteArray; + + /** + * Constructor which uses an on heap list. This should be using during construction of the bitset. + */ + public ByteArrayBackedBitset(int capacity) { + byteArray = new byte[capacity]; + } + + /** + * Constructor which set the Lucene's RandomAccessInput to read the bitset into a read-only buffer. + */ + public ByteArrayBackedBitset(RandomAccessInput in, long offset, int length) throws IOException { + byteArray = new byte[length]; + int i = 0; + while (i < length) { + byteArray[i] = in.readByte(offset + i); + i++; + } + } + + /** + * Constructor which set the Lucene's IndexInput to read the bitset into a read-only buffer. + */ + public ByteArrayBackedBitset(IndexInput in, int length) throws IOException { + byteArray = new byte[length]; + int i = 0; + while (i < length) { + byteArray[i] = in.readByte(); + i++; + } + } + + /** + * Sets the bit at the given index to 1. + * Each byte can indicate 8 bits, so the index is divided by 8 to get the byte array index. + * @param index the index to set the bit + */ + public void set(int index) { + int byteArrIndex = index >> 3; + byteArray[byteArrIndex] |= (byte) (1 << (index & 7)); + } + + public int write(IndexOutput output) throws IOException { + int numBytes = 0; + for (Byte bitSet : byteArray) { + output.writeByte(bitSet); + numBytes += Byte.BYTES; + } + return numBytes; + } + + /** + * Retrieves whether the bit is set or not at the given index. + * @param index the index to look up for the bit + * @return true if bit is set, false otherwise + */ + public boolean get(int index) throws IOException { + int byteArrIndex = index >> 3; + return (byteArray[byteArrIndex] & (1 << (index & 7))) != 0; + } + + public int getCurrBytesRead() { + return byteArray.length; + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesWriter.java b/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesWriter.java index 3859d3c998573..6ed1a8c42e380 100644 --- a/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesWriter.java +++ b/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesWriter.java @@ -8,8 +8,6 @@ package org.opensearch.index.codec.composite; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.DocValuesConsumer; import org.apache.lucene.codecs.DocValuesProducer; import org.apache.lucene.index.DocValues; @@ -50,9 +48,9 @@ public class Composite99DocValuesWriter extends DocValuesConsumer { private final Set compositeMappedFieldTypes; private final Set compositeFieldSet; private final Set segmentFieldSet; + private final boolean segmentHasCompositeFields; private final Map fieldProducerMap = new HashMap<>(); - private static final Logger logger = LogManager.getLogger(Composite99DocValuesWriter.class); public Composite99DocValuesWriter(DocValuesConsumer delegate, SegmentWriteState segmentWriteState, MapperService mapperService) { @@ -70,6 +68,8 @@ public Composite99DocValuesWriter(DocValuesConsumer delegate, SegmentWriteState for (CompositeMappedFieldType type : compositeMappedFieldTypes) { compositeFieldSet.addAll(type.fields()); } + // check if there are any composite fields which are part of the segment + segmentHasCompositeFields = Collections.disjoint(segmentFieldSet, compositeFieldSet) == false; } @Override @@ -91,7 +91,7 @@ public void addSortedField(FieldInfo field, DocValuesProducer valuesProducer) th public void addSortedNumericField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { delegate.addSortedNumericField(field, valuesProducer); // Perform this only during flush flow - if (mergeState.get() == null) { + if (mergeState.get() == null && segmentHasCompositeFields) { createCompositeIndicesIfPossible(valuesProducer, field); } } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregator.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregator.java index 5390b6728b9b6..ed159ee2efb7b 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregator.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregator.java @@ -68,4 +68,9 @@ public Long toLongValue(Long value) { public Long toStarTreeNumericTypeValue(Long value) { return value; } + + @Override + public Long getIdentityMetricValue() { + return 0L; + } } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/SumValueAggregator.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/SumValueAggregator.java index 385549216e4d6..a471f0e2bd960 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/SumValueAggregator.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/SumValueAggregator.java @@ -103,4 +103,9 @@ public Double toStarTreeNumericTypeValue(Long value) { throw new IllegalStateException("Cannot convert " + value + " to sortable aggregation type", e); } } + + @Override + public Double getIdentityMetricValue() { + return 0D; + } } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/ValueAggregator.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/ValueAggregator.java index 93230ed012b13..048582cc530e5 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/ValueAggregator.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/ValueAggregator.java @@ -61,4 +61,9 @@ public interface ValueAggregator { * Converts an aggregated value from a Long type. */ A toStarTreeNumericTypeValue(Long rawValue); + + /** + * Fetches a value that does not alter the result of aggregations + */ + A getIdentityMetricValue(); } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/AbstractDocumentsFileManager.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/AbstractDocumentsFileManager.java new file mode 100644 index 0000000000000..78c49dbada6b2 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/AbstractDocumentsFileManager.java @@ -0,0 +1,231 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube.startree.builder; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.index.SegmentWriteState; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.store.RandomAccessInput; +import org.apache.lucene.store.TrackingDirectoryWrapper; +import org.apache.lucene.util.NumericUtils; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeDocument; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; +import org.opensearch.index.compositeindex.datacube.startree.aggregators.MetricAggregatorInfo; +import org.opensearch.index.compositeindex.datacube.startree.aggregators.numerictype.StarTreeNumericTypeConverters; +import org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeDocumentBitSetUtil; + +import java.io.Closeable; +import java.io.IOException; +import java.util.List; + +/** + * Abstract class for managing star tree file operations. + * + * @opensearch.experimental + */ +@ExperimentalApi +public abstract class AbstractDocumentsFileManager implements Closeable { + private static final Logger logger = LogManager.getLogger(AbstractDocumentsFileManager.class); + protected final StarTreeField starTreeField; + protected final List metricAggregatorInfos; + protected final int numMetrics; + protected final TrackingDirectoryWrapper tmpDirectory; + protected final SegmentWriteState state; + protected int docSizeInBytes = -1; + + public AbstractDocumentsFileManager( + SegmentWriteState state, + StarTreeField starTreeField, + List metricAggregatorInfos + ) { + this.starTreeField = starTreeField; + this.tmpDirectory = new TrackingDirectoryWrapper(state.directory); + this.metricAggregatorInfos = metricAggregatorInfos; + this.state = state; + numMetrics = metricAggregatorInfos.size(); + } + + private void setDocSizeInBytes(int numBytes) { + if (docSizeInBytes == -1) { + docSizeInBytes = numBytes; + } + assert docSizeInBytes == numBytes; + } + + /** + * Write the star tree document to file associated with dimensions and metrics + */ + protected int writeStarTreeDocument(StarTreeDocument starTreeDocument, IndexOutput output, boolean isAggregatedDoc) throws IOException { + int numBytes = writeDimensions(starTreeDocument, output); + numBytes += writeMetrics(starTreeDocument, output, isAggregatedDoc); + setDocSizeInBytes(numBytes); + return numBytes; + } + + /** + * Write dimensions to file + */ + protected int writeDimensions(StarTreeDocument starTreeDocument, IndexOutput output) throws IOException { + int numBytes = 0; + for (int i = 0; i < starTreeDocument.dimensions.length; i++) { + output.writeLong(starTreeDocument.dimensions[i] == null ? 0L : starTreeDocument.dimensions[i]); + numBytes += Long.BYTES; + } + numBytes += StarTreeDocumentBitSetUtil.writeBitSet(starTreeDocument.dimensions, output); + return numBytes; + } + + /** + * Write star tree document metrics to file + */ + protected int writeMetrics(StarTreeDocument starTreeDocument, IndexOutput output, boolean isAggregatedDoc) throws IOException { + int numBytes = 0; + for (int i = 0; i < starTreeDocument.metrics.length; i++) { + switch (metricAggregatorInfos.get(i).getValueAggregators().getAggregatedValueType()) { + case LONG: + output.writeLong(starTreeDocument.metrics[i] == null ? 0L : (Long) starTreeDocument.metrics[i]); + numBytes += Long.BYTES; + break; + case DOUBLE: + if (isAggregatedDoc) { + long val = NumericUtils.doubleToSortableLong( + starTreeDocument.metrics[i] == null ? 0.0 : (Double) starTreeDocument.metrics[i] + ); + output.writeLong(val); + } else { + output.writeLong(starTreeDocument.metrics[i] == null ? 0L : (Long) starTreeDocument.metrics[i]); + } + numBytes += Long.BYTES; + break; + default: + throw new IllegalStateException("Unsupported metric type"); + } + } + numBytes += StarTreeDocumentBitSetUtil.writeBitSet(starTreeDocument.metrics, output); + return numBytes; + } + + /** + * Reads the star tree document from file with given offset + * + * @param input RandomAccessInput + * @param offset Offset in the file + * @param isAggregatedDoc boolean to indicate if aggregated star tree docs should be read + * @return StarTreeDocument + * @throws IOException IOException in case of I/O errors + */ + protected StarTreeDocument readStarTreeDocument(RandomAccessInput input, long offset, boolean isAggregatedDoc) throws IOException { + int dimSize = starTreeField.getDimensionsOrder().size(); + Long[] dimensions = new Long[dimSize]; + long initialOffset = offset; + offset = readDimensions(dimensions, input, offset); + + Object[] metrics = new Object[numMetrics]; + offset = readMetrics(input, offset, numMetrics, metrics, isAggregatedDoc); + assert (offset - initialOffset) == docSizeInBytes; + return new StarTreeDocument(dimensions, metrics); + } + + /** + * Read dimensions from file + */ + protected long readDimensions(Long[] dimensions, RandomAccessInput input, long offset) throws IOException { + for (int i = 0; i < dimensions.length; i++) { + try { + dimensions[i] = input.readLong(offset); + } catch (Exception e) { + logger.error("Error reading dimension value at offset {} for dimension {}", offset, i); + throw e; + } + offset += Long.BYTES; + } + offset += StarTreeDocumentBitSetUtil.readBitSet(input, offset, dimensions, index -> null); + return offset; + } + + /** + * Read star tree metrics from file + */ + protected long readMetrics(RandomAccessInput input, long offset, int numMetrics, Object[] metrics, boolean isAggregatedDoc) + throws IOException { + for (int i = 0; i < numMetrics; i++) { + switch (metricAggregatorInfos.get(i).getValueAggregators().getAggregatedValueType()) { + case LONG: + metrics[i] = input.readLong(offset); + offset += Long.BYTES; + break; + case DOUBLE: + long val = input.readLong(offset); + if (isAggregatedDoc) { + metrics[i] = StarTreeNumericTypeConverters.sortableLongtoDouble(val); + } else { + metrics[i] = val; + } + offset += Long.BYTES; + break; + default: + throw new IllegalStateException("Unsupported metric type"); + } + } + offset += StarTreeDocumentBitSetUtil.readBitSet( + input, + offset, + metrics, + index -> metricAggregatorInfos.get(index).getValueAggregators().getIdentityMetricValue() + ); + return offset; + } + + /** + * Write star tree document to file + */ + public abstract void writeStarTreeDocument(StarTreeDocument starTreeDocument, boolean isAggregatedDoc) throws IOException; + + /** + * Read star tree document from file based on doc id + */ + public abstract StarTreeDocument readStarTreeDocument(int docId, boolean isAggregatedDoc) throws IOException; + + /** + * Read star document dimensions from file based on doc id + */ + public abstract Long[] readDimensions(int docId) throws IOException; + + /** + * Read dimension value for given doc id and dimension id + */ + public abstract Long getDimensionValue(int docId, int dimensionId) throws IOException; + + /** + * Delete the temporary files created + */ + public void deleteFiles(boolean success) throws IOException { + if (success) { + for (String file : tmpDirectory.getCreatedFiles()) { + tmpDirectory.deleteFile(file); + } + } else { + deleteFilesIgnoringException(); + } + + } + + /** + * Delete the temporary files created + */ + private void deleteFilesIgnoringException() throws IOException { + for (String file : tmpDirectory.getCreatedFiles()) { + try { + tmpDirectory.deleteFile(file); + } catch (final IOException ignored) {} // similar to IOUtils.deleteFilesWhileIgnoringExceptions + } + } +} diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java index 7187fade882ea..56bb46e83a9da 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java @@ -72,8 +72,7 @@ public abstract class BaseStarTreeBuilder implements StarTreeBuilder { protected final TreeNode rootNode = getNewNode(); - private final StarTreeField starTreeField; - private final MapperService mapperService; + protected final StarTreeField starTreeField; private final SegmentWriteState state; static String NUM_SEGMENT_DOCS = "numSegmentDocs"; @@ -95,7 +94,6 @@ protected BaseStarTreeBuilder(StarTreeField starTreeField, SegmentWriteState sta this.skipStarNodeCreationForDimensions = new HashSet<>(); this.totalSegmentDocs = state.segmentInfo.maxDoc(); - this.mapperService = mapperService; this.state = state; Set skipStarNodeCreationForDimensions = starTreeFieldSpec.getSkipStarNodeCreationInDims(); @@ -141,6 +139,37 @@ public List generateMetricAggregatorInfos(MapperService ma return metricAggregatorInfos; } + /** + * Get star tree document from the segment for the current docId with the dimensionReaders and metricReaders + */ + protected StarTreeDocument getStarTreeDocument( + int currentDocId, + SequentialDocValuesIterator[] dimensionReaders, + List metricReaders + ) throws IOException { + Long[] dims = new Long[numDimensions]; + int i = 0; + for (SequentialDocValuesIterator dimensionDocValueIterator : dimensionReaders) { + dimensionDocValueIterator.nextDoc(currentDocId); + Long val = dimensionDocValueIterator.value(currentDocId); + dims[i] = val; + i++; + } + i = 0; + Object[] metrics = new Object[metricReaders.size()]; + for (SequentialDocValuesIterator metricDocValuesIterator : metricReaders) { + metricDocValuesIterator.nextDoc(currentDocId); + // As part of merge, we traverse the star tree doc values + // The type of data stored in metric fields is different from the + // actual indexing field they're based on + metrics[i] = metricAggregatorInfos.get(i) + .getValueAggregators() + .toStarTreeNumericTypeValue(metricDocValuesIterator.value(currentDocId)); + i++; + } + return new StarTreeDocument(dims, metrics); + } + /** * Adds a document to the star-tree. * @@ -163,7 +192,7 @@ public List generateMetricAggregatorInfos(MapperService ma * * @return Star tree documents */ - public abstract List getStarTreeDocuments(); + public abstract List getStarTreeDocuments() throws IOException; /** * Returns the value of the dimension for the given dimension id and document in the star-tree. @@ -330,8 +359,13 @@ protected StarTreeDocument reduceSegmentStarTreeDocuments( * @return converted metric value to long */ private static long getLong(Object metric) { - Long metricValue = null; + // TODO : remove this after we merge identity changes + if (metric instanceof Double) { + if (0D == (double) metric) { + return 0L; + } + } try { if (metric instanceof Long) { metricValue = (long) metric; @@ -709,4 +743,8 @@ public void close() throws IOException { } abstract Iterator mergeStarTrees(List starTreeValues) throws IOException; + + public TreeNode getRootNode() { + return rootNode; + } } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/OffHeapStarTreeBuilder.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/OffHeapStarTreeBuilder.java new file mode 100644 index 0000000000000..f63b0cb0cc77d --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/OffHeapStarTreeBuilder.java @@ -0,0 +1,334 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube.startree.builder; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.index.SegmentWriteState; +import org.apache.lucene.search.DocIdSetIterator; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.index.codec.composite.datacube.startree.StarTreeValues; +import org.opensearch.index.compositeindex.datacube.Dimension; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeDocument; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; +import org.opensearch.index.compositeindex.datacube.startree.utils.SequentialDocValuesIterator; +import org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeDocumentsSorter; +import org.opensearch.index.mapper.MapperService; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * Off-heap implementation of the star tree builder. + * @opensearch.experimental + */ +@ExperimentalApi +public class OffHeapStarTreeBuilder extends BaseStarTreeBuilder { + private static final Logger logger = LogManager.getLogger(OffHeapStarTreeBuilder.class); + private final StarTreeDocsFileManager starTreeDocumentFileManager; + private final SegmentDocsFileManager segmentDocumentFileManager; + + /** + * Builds star tree based on star tree field configuration consisting of dimensions, metrics and star tree index + * specific configuration. + * + * @param starTreeField holds the configuration for the star tree + * @param state stores the segment write state + * @param mapperService helps to find the original type of the field + */ + protected OffHeapStarTreeBuilder(StarTreeField starTreeField, SegmentWriteState state, MapperService mapperService) throws IOException { + super(starTreeField, state, mapperService); + segmentDocumentFileManager = new SegmentDocsFileManager(state, starTreeField, metricAggregatorInfos); + try { + starTreeDocumentFileManager = new StarTreeDocsFileManager(state, starTreeField, metricAggregatorInfos); + } catch (IOException e) { + IOUtils.closeWhileHandlingException(segmentDocumentFileManager); + throw e; + } + + } + + @Override + public void appendStarTreeDocument(StarTreeDocument starTreeDocument) throws IOException { + starTreeDocumentFileManager.writeStarTreeDocument(starTreeDocument, true); + } + + /** + * Builds star tree based on the star tree values from multiple segments + * + * @param starTreeValuesSubs contains the star tree values from multiple segments + */ + @Override + public void build(List starTreeValuesSubs) throws IOException { + boolean success = false; + try { + build(mergeStarTrees(starTreeValuesSubs)); + success = true; + } finally { + starTreeDocumentFileManager.deleteFiles(success); + segmentDocumentFileManager.deleteFiles(success); + } + } + + /** + * Sorts and aggregates all the documents of the segment based on dimension and metrics configuration + * + * @param dimensionReaders List of docValues readers to read dimensions from the segment + * @param metricReaders List of docValues readers to read metrics from the segment + * @return Iterator of star-tree documents + */ + @Override + public Iterator sortAndAggregateSegmentDocuments( + SequentialDocValuesIterator[] dimensionReaders, + List metricReaders + ) throws IOException { + // Write all dimensions for segment documents into the buffer, + // and sort all documents using an int array + int[] sortedDocIds = new int[totalSegmentDocs]; + for (int i = 0; i < totalSegmentDocs; i++) { + sortedDocIds[i] = i; + } + try { + for (int i = 0; i < totalSegmentDocs; i++) { + StarTreeDocument document = getSegmentStarTreeDocument(i, dimensionReaders, metricReaders); + segmentDocumentFileManager.writeStarTreeDocument(document, false); + } + } catch (IOException ex) { + segmentDocumentFileManager.close(); + throw ex; + } + // Create an iterator for aggregated documents + return sortAndReduceDocuments(sortedDocIds, totalSegmentDocs, false); + } + + /** + * Sorts and aggregates the star-tree documents from multiple segments and builds star tree based on the newly + * aggregated star-tree documents + * + * @param starTreeValuesSubs StarTreeValues from multiple segments + * @return iterator of star tree documents + */ + Iterator mergeStarTrees(List starTreeValuesSubs) throws IOException { + int numDocs = 0; + int[] docIds; + try { + for (StarTreeValues starTreeValues : starTreeValuesSubs) { + List dimensionsSplitOrder = starTreeValues.getStarTreeField().getDimensionsOrder(); + SequentialDocValuesIterator[] dimensionReaders = new SequentialDocValuesIterator[starTreeValues.getStarTreeField() + .getDimensionsOrder() + .size()]; + for (int i = 0; i < dimensionsSplitOrder.size(); i++) { + String dimension = dimensionsSplitOrder.get(i).getField(); + dimensionReaders[i] = new SequentialDocValuesIterator(starTreeValues.getDimensionDocValuesIteratorMap().get(dimension)); + } + List metricReaders = new ArrayList<>(); + for (Map.Entry metricDocValuesEntry : starTreeValues.getMetricDocValuesIteratorMap().entrySet()) { + metricReaders.add(new SequentialDocValuesIterator(metricDocValuesEntry.getValue())); + } + int currentDocId = 0; + int numSegmentDocs = Integer.parseInt( + starTreeValues.getAttributes().getOrDefault(NUM_SEGMENT_DOCS, String.valueOf(DocIdSetIterator.NO_MORE_DOCS)) + ); + while (currentDocId < numSegmentDocs) { + StarTreeDocument starTreeDocument = getStarTreeDocument(currentDocId, dimensionReaders, metricReaders); + segmentDocumentFileManager.writeStarTreeDocument(starTreeDocument, true); + numDocs++; + currentDocId++; + } + } + docIds = new int[numDocs]; + for (int i = 0; i < numDocs; i++) { + docIds[i] = i; + } + } catch (IOException ex) { + segmentDocumentFileManager.close(); + throw ex; + } + + if (numDocs == 0) { + return Collections.emptyIterator(); + } + + return sortAndReduceDocuments(docIds, numDocs, true); + } + + /** + * Sorts and reduces the star tree documents based on the dimensions + */ + private Iterator sortAndReduceDocuments(int[] sortedDocIds, int numDocs, boolean isMerge) throws IOException { + try { + if (sortedDocIds == null || sortedDocIds.length == 0) { + logger.debug("Sorted doc ids array is null"); + return Collections.emptyIterator(); + } + try { + StarTreeDocumentsSorter.sort(sortedDocIds, -1, numDocs, index -> { + try { + return segmentDocumentFileManager.readDimensions(sortedDocIds[index]); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + }); + } catch (UncheckedIOException ex) { + // Unwrap UncheckedIOException and throw as IOException + if (ex.getCause() != null) { + throw ex.getCause(); + } + throw ex; + } + final StarTreeDocument currentDocument = segmentDocumentFileManager.readStarTreeDocument(sortedDocIds[0], isMerge); + // Create an iterator for aggregated documents + return new Iterator() { + StarTreeDocument tempCurrentDocument = currentDocument; + boolean hasNext = true; + int docId = 1; + + @Override + public boolean hasNext() { + return hasNext; + } + + @Override + public StarTreeDocument next() { + StarTreeDocument next = reduceSegmentStarTreeDocuments(null, tempCurrentDocument, isMerge); + while (docId < numDocs) { + StarTreeDocument doc; + try { + doc = segmentDocumentFileManager.readStarTreeDocument(sortedDocIds[docId++], isMerge); + } catch (IOException e) { + throw new RuntimeException("Reducing documents failed ", e); + } + if (!Arrays.equals(doc.dimensions, next.dimensions)) { + tempCurrentDocument = doc; + return next; + } else { + next = reduceSegmentStarTreeDocuments(next, doc, isMerge); + } + } + hasNext = false; + try { + segmentDocumentFileManager.close(); + } catch (IOException ex) { + logger.error("Closing segment documents file failed", ex); + } + return next; + } + }; + } catch (IOException ex) { + IOUtils.closeWhileHandlingException(segmentDocumentFileManager); + throw ex; + } + } + + /** + * Get star tree document for the given docId from the star-tree.documents file + */ + @Override + public StarTreeDocument getStarTreeDocument(int docId) throws IOException { + return starTreeDocumentFileManager.readStarTreeDocument(docId, true); + } + + // This should be only used for testing + @Override + public List getStarTreeDocuments() throws IOException { + List starTreeDocuments = new ArrayList<>(); + for (int i = 0; i < numStarTreeDocs; i++) { + starTreeDocuments.add(getStarTreeDocument(i)); + } + return starTreeDocuments; + } + + @Override + public Long getDimensionValue(int docId, int dimensionId) throws IOException { + return starTreeDocumentFileManager.getDimensionValue(docId, dimensionId); + } + + /** + * Generates a star-tree for a given star-node + * + * @param startDocId Start document id in the star-tree + * @param endDocId End document id (exclusive) in the star-tree + * @param dimensionId Dimension id of the star-node + * @return iterator for star-tree documents of star-node + * @throws IOException throws when unable to generate star-tree for star-node + */ + @Override + public Iterator generateStarTreeDocumentsForStarNode(int startDocId, int endDocId, int dimensionId) + throws IOException { + // Sort all documents using an int array + int numDocs = endDocId - startDocId; + int[] sortedDocIds = new int[numDocs]; + for (int i = 0; i < numDocs; i++) { + sortedDocIds[i] = startDocId + i; + } + StarTreeDocumentsSorter.sort(sortedDocIds, dimensionId, numDocs, index -> { + try { + return starTreeDocumentFileManager.readDimensions(sortedDocIds[index]); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + // Create an iterator for aggregated documents + return new Iterator() { + boolean hasNext = true; + StarTreeDocument currentDocument = getStarTreeDocument(sortedDocIds[0]); + int docId = 1; + + private boolean hasSameDimensions(StarTreeDocument document1, StarTreeDocument document2) { + for (int i = dimensionId + 1; i < starTreeField.getDimensionsOrder().size(); i++) { + if (!Objects.equals(document1.dimensions[i], document2.dimensions[i])) { + return false; + } + } + return true; + } + + @Override + public boolean hasNext() { + return hasNext; + } + + @Override + public StarTreeDocument next() { + StarTreeDocument next = reduceStarTreeDocuments(null, currentDocument); + next.dimensions[dimensionId] = STAR_IN_DOC_VALUES_INDEX; + while (docId < numDocs) { + StarTreeDocument document; + try { + document = getStarTreeDocument(sortedDocIds[docId++]); + } catch (IOException e) { + throw new RuntimeException(e); + } + if (!hasSameDimensions(document, currentDocument)) { + currentDocument = document; + return next; + } else { + next = reduceStarTreeDocuments(next, document); + } + } + hasNext = false; + return next; + } + }; + } + + @Override + public void close() throws IOException { + IOUtils.closeWhileHandlingException(starTreeDocumentFileManager, segmentDocumentFileManager); + super.close(); + } +} diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/OnHeapStarTreeBuilder.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/OnHeapStarTreeBuilder.java index 1599be2e76a56..8ff111d3b41d9 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/OnHeapStarTreeBuilder.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/OnHeapStarTreeBuilder.java @@ -127,34 +127,12 @@ StarTreeDocument[] getSegmentsStarTreeDocuments(List starTreeVal metricReaders.add(new SequentialDocValuesIterator(metricDocValuesEntry.getValue())); } - boolean endOfDoc = false; int currentDocId = 0; int numSegmentDocs = Integer.parseInt( starTreeValues.getAttributes().getOrDefault(NUM_SEGMENT_DOCS, String.valueOf(DocIdSetIterator.NO_MORE_DOCS)) ); while (currentDocId < numSegmentDocs) { - Long[] dims = new Long[dimensionsSplitOrder.size()]; - int i = 0; - for (SequentialDocValuesIterator dimensionDocValueIterator : dimensionReaders) { - dimensionDocValueIterator.nextDoc(currentDocId); - Long val = dimensionDocValueIterator.value(currentDocId); - dims[i] = val; - i++; - } - i = 0; - Object[] metrics = new Object[metricReaders.size()]; - for (SequentialDocValuesIterator metricDocValuesIterator : metricReaders) { - metricDocValuesIterator.nextDoc(currentDocId); - // As part of merge, we traverse the star tree doc values - // The type of data stored in metric fields is different from the - // actual indexing field they're based on - metrics[i] = metricAggregatorInfos.get(i) - .getValueAggregators() - .toStarTreeNumericTypeValue(metricDocValuesIterator.value(currentDocId)); - i++; - } - StarTreeDocument starTreeDocument = new StarTreeDocument(dims, metrics); - starTreeDocuments.add(starTreeDocument); + starTreeDocuments.add(getStarTreeDocument(currentDocId, dimensionReaders, metricReaders)); currentDocId++; } } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/SegmentDocsFileManager.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/SegmentDocsFileManager.java new file mode 100644 index 0000000000000..fe94df57d9535 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/SegmentDocsFileManager.java @@ -0,0 +1,103 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube.startree.builder; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.index.SegmentWriteState; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.store.RandomAccessInput; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeDocument; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; +import org.opensearch.index.compositeindex.datacube.startree.aggregators.MetricAggregatorInfo; + +import java.io.Closeable; +import java.io.IOException; +import java.util.List; + +/** + * Class for managing segment documents file. + * Segment documents are stored in a single file named 'segment.documents' for sorting and aggregation. A document ID array is created, + * and the document IDs in the array are swapped during sorting based on the actual segment document values in the file. + * + * @opensearch.experimental + */ +@ExperimentalApi +public class SegmentDocsFileManager extends AbstractDocumentsFileManager implements Closeable { + + private static final Logger logger = LogManager.getLogger(SegmentDocsFileManager.class); + private static final String SEGMENT_DOC_FILE_NAME = "segment.documents"; + private IndexInput segmentDocsFileInput; + private RandomAccessInput segmentRandomInput; + final IndexOutput segmentDocsFileOutput; + + public SegmentDocsFileManager(SegmentWriteState state, StarTreeField starTreeField, List metricAggregatorInfos) + throws IOException { + super(state, starTreeField, metricAggregatorInfos); + try { + segmentDocsFileOutput = tmpDirectory.createTempOutput(SEGMENT_DOC_FILE_NAME, state.segmentSuffix, state.context); + } catch (IOException e) { + IOUtils.closeWhileHandlingException(this); + throw e; + } + } + + @Override + public void writeStarTreeDocument(StarTreeDocument starTreeDocument, boolean isAggregatedDoc) throws IOException { + writeStarTreeDocument(starTreeDocument, segmentDocsFileOutput, isAggregatedDoc); + } + + private void maybeInitializeSegmentInput() throws IOException { + try { + if (segmentDocsFileInput == null) { + IOUtils.closeWhileHandlingException(segmentDocsFileOutput); + segmentDocsFileInput = tmpDirectory.openInput(segmentDocsFileOutput.getName(), state.context); + segmentRandomInput = segmentDocsFileInput.randomAccessSlice(0, segmentDocsFileInput.length()); + } + } catch (IOException e) { + IOUtils.closeWhileHandlingException(this); + throw e; + } + } + + @Override + public StarTreeDocument readStarTreeDocument(int docId, boolean isAggregatedDoc) throws IOException { + maybeInitializeSegmentInput(); + return readStarTreeDocument(segmentRandomInput, (long) docId * docSizeInBytes, isAggregatedDoc); + } + + @Override + public Long[] readDimensions(int docId) throws IOException { + maybeInitializeSegmentInput(); + Long[] dims = new Long[starTreeField.getDimensionsOrder().size()]; + readDimensions(dims, segmentRandomInput, (long) docId * docSizeInBytes); + return dims; + } + + @Override + public Long getDimensionValue(int docId, int dimensionId) throws IOException { + Long[] dims = readDimensions(docId); + return dims[dimensionId]; + } + + @Override + public void close() throws IOException { + try { + if (this.segmentDocsFileOutput != null) { + IOUtils.closeWhileHandlingException(segmentDocsFileOutput); + tmpDirectory.deleteFile(segmentDocsFileOutput.getName()); + } + } finally { + IOUtils.closeWhileHandlingException(segmentDocsFileInput, segmentDocsFileOutput); + } + } +} diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeDocsFileManager.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeDocsFileManager.java new file mode 100644 index 0000000000000..779ed77b0540a --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeDocsFileManager.java @@ -0,0 +1,294 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube.startree.builder; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.index.SegmentWriteState; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.store.RandomAccessInput; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeDocument; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; +import org.opensearch.index.compositeindex.datacube.startree.aggregators.MetricAggregatorInfo; + +import java.io.Closeable; +import java.io.IOException; +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +/** + * Star tree document file manager. + * This class manages all the temporary files associated with off heap star tree builder. + *

+ * Star tree documents are stored in multiple 'star-tree.documents' files. The algorithm works as follows: + *

    + *
  1. Initially, aggregated documents are created based on the segment documents.
  2. + *
  3. Further, star tree documents are generated (e.g., in the {@code generateStarTreeDocumentsForStarNode} method) by reading the current + * aggregated documents and creating new aggregated star tree documents, which are appended to the 'star-tree.documents' files.
  4. + *
  5. This process is repeated until all combinations of star tree documents are generated.
  6. + *
+ *

In cases where previously written star tree documents need to be read from the 'star-tree.documents' files, the current + * 'star-tree.documents' file is closed, and the values are read. Then, the derived values gets appended to a new 'star-tree.documents' file. + * This is necessary because Lucene maintains immutability of data, and an {@code IndexOutput} cannot be kept open while creating an + * {@code IndexInput} on the same file, as all file contents may not be visible in the reader. Therefore, the {@code IndexOutput} must be + * closed to ensure all data can be read before creating an {@code IndexInput}. Additionally, an {@code IndexOutput} cannot be reopened, + * so a new file is created for the new star tree documents. + *

The set of 'star-tree.documents' files is maintained, and a tracker array is used to keep track of the start document ID for each file. + * Once the number of files reaches a set threshold, the files are merged. + * + */ +public class StarTreeDocsFileManager extends AbstractDocumentsFileManager implements Closeable { + private static final Logger logger = LogManager.getLogger(StarTreeDocsFileManager.class); + private static final String STAR_TREE_DOC_FILE_NAME = "star-tree.documents"; + public static final int DEFAULT_FILE_COUNT_MERGE_THRESHOLD = 5; + private IndexInput starTreeDocsFileInput; + private RandomAccessInput starTreeDocsFileRandomInput; + private IndexOutput starTreeDocsFileOutput; + private final Map fileToEndDocIdMap; + private final List starTreeDocumentOffsets = new ArrayList<>(); + private int currentFileStartDocId; + private int numReadableStarTreeDocuments; + private int starTreeFileCount = -1; + private int currBytes = 0; + private final int fileCountMergeThreshold; + private int numStarTreeDocs = 0; + + public StarTreeDocsFileManager(SegmentWriteState state, StarTreeField starTreeField, List metricAggregatorInfos) + throws IOException { + this(state, starTreeField, metricAggregatorInfos, DEFAULT_FILE_COUNT_MERGE_THRESHOLD); + } + + public StarTreeDocsFileManager( + SegmentWriteState state, + StarTreeField starTreeField, + List metricAggregatorInfos, + int fileCountThreshold + ) throws IOException { + super(state, starTreeField, metricAggregatorInfos); + fileToEndDocIdMap = new LinkedHashMap<>(); + try { + starTreeDocsFileOutput = createStarTreeDocumentsFileOutput(); + } catch (IOException e) { + IOUtils.closeWhileHandlingException(starTreeDocsFileOutput); + IOUtils.closeWhileHandlingException(this); + throw e; + } + fileCountMergeThreshold = fileCountThreshold; + } + + /** + * Creates a new star tree document temporary file to store star tree documents. + */ + IndexOutput createStarTreeDocumentsFileOutput() throws IOException { + starTreeFileCount++; + return tmpDirectory.createTempOutput(STAR_TREE_DOC_FILE_NAME + starTreeFileCount, state.segmentSuffix, state.context); + } + + @Override + public void writeStarTreeDocument(StarTreeDocument starTreeDocument, boolean isAggregatedDoc) throws IOException { + assert isAggregatedDoc == true; + int numBytes = writeStarTreeDocument(starTreeDocument, starTreeDocsFileOutput, true); + addStarTreeDocumentOffset(numBytes); + numStarTreeDocs++; + } + + @Override + public StarTreeDocument readStarTreeDocument(int docId, boolean isAggregatedDoc) throws IOException { + assert isAggregatedDoc == true; + ensureDocumentReadable(docId); + return readStarTreeDocument(starTreeDocsFileRandomInput, starTreeDocumentOffsets.get(docId), true); + } + + @Override + public Long getDimensionValue(int docId, int dimensionId) throws IOException { + Long[] dims = readDimensions(docId); + return dims[dimensionId]; + } + + @Override + public Long[] readDimensions(int docId) throws IOException { + ensureDocumentReadable(docId); + Long[] dims = new Long[starTreeField.getDimensionsOrder().size()]; + readDimensions(dims, starTreeDocsFileRandomInput, starTreeDocumentOffsets.get(docId)); + return dims; + } + + private void addStarTreeDocumentOffset(int bytes) { + starTreeDocumentOffsets.add(currBytes); + currBytes += bytes; + if (docSizeInBytes == -1) { + docSizeInBytes = bytes; + } + assert docSizeInBytes == bytes; + } + + /** + * Load the correct StarTreeDocuments file based on the docId + */ + private void ensureDocumentReadable(int docId) throws IOException { + ensureDocumentReadable(docId, true); + } + + /** + * Load the correct StarTreeDocuments file based on the docId + * "currentFileStartDocId" and "numReadableStarTreeDocuments" tracks the "start doc id" and "end doc id + 1" + * of the range in the current open 'star-tree.documents' file + */ + private void ensureDocumentReadable(int docId, boolean shouldCreateFileOutput) throws IOException { + try { + if (docId >= currentFileStartDocId && docId < numReadableStarTreeDocuments) { + return; + } + IOUtils.closeWhileHandlingException(starTreeDocsFileInput); + starTreeDocsFileInput = null; + if (docId < numStarTreeDocs) { + loadStarTreeDocumentFile(docId); + } + if (starTreeDocsFileInput != null) { + return; + } + closeAndMaybeCreateNewFile(shouldCreateFileOutput, numStarTreeDocs); + loadStarTreeDocumentFile(docId); + } catch (IOException ex) { + IOUtils.closeWhileHandlingException(this); + throw ex; + } + } + + /** + * The fileToByteSizeMap is in the following format + * file1 == 521 [ contains docs from 0 to 520 ] + * file2 == 780 [ contains docs from 521 to 779 ] + *

+ * This method loads the correct 'star-tree.documents' file based on the docId + * and updates the "currentFileStartDocId" and "numReadableStarTreeDocuments" + */ + private void loadStarTreeDocumentFile(int docId) throws IOException { + int currentFileStartDocId = 0; + for (Map.Entry entry : fileToEndDocIdMap.entrySet()) { + if (docId < entry.getValue()) { + starTreeDocsFileInput = tmpDirectory.openInput(entry.getKey(), state.context); + starTreeDocsFileRandomInput = starTreeDocsFileInput.randomAccessSlice( + starTreeDocsFileInput.getFilePointer(), + starTreeDocsFileInput.length() - starTreeDocsFileInput.getFilePointer() + ); + numReadableStarTreeDocuments = entry.getValue(); + break; + } + currentFileStartDocId = entry.getValue(); + } + this.currentFileStartDocId = currentFileStartDocId; + } + + /** + * This case handles when the requested document ID is beyond the range of the currently open 'star-tree.documents' file. + * In this scenario, the following steps are taken: + *

+ * 1. Close the current 'star-tree.documents' file. + * 2. Create a new 'star-tree.documents' file if the operation involves appending new documents. + * If the operation is only for reading existing documents, a new file is not created. + */ + private void closeAndMaybeCreateNewFile(boolean shouldCreateFileForAppend, int numStarTreeDocs) throws IOException { + currBytes = 0; + if (starTreeDocsFileOutput != null) { + fileToEndDocIdMap.put(starTreeDocsFileOutput.getName(), numStarTreeDocs); + IOUtils.close(starTreeDocsFileOutput); + } + if (shouldCreateFileForAppend) { + starTreeDocsFileOutput = createStarTreeDocumentsFileOutput(); + if (fileToEndDocIdMap.size() >= fileCountMergeThreshold) { + mergeFiles(numStarTreeDocs); + } + } + if (starTreeDocsFileRandomInput != null) { + starTreeDocsFileRandomInput = null; + } + } + + /** + * Merge temporary star tree files once the number of files reach threshold + */ + private void mergeFiles(int numStarTreeDocs) throws IOException { + long st = System.currentTimeMillis(); + try (IndexOutput mergedOutput = createStarTreeDocumentsFileOutput()) { + long mergeBytes = mergeFilesToOutput(mergedOutput); + logger.debug( + "Created merge file : {} in : {} ms with size of : {} KB", + starTreeDocsFileOutput.getName(), + System.currentTimeMillis() - st, + mergeBytes / 1024 + ); + + deleteOldFiles(); + fileToEndDocIdMap.clear(); + fileToEndDocIdMap.put(mergedOutput.getName(), numStarTreeDocs); + resetStarTreeDocumentOffsets(); + } + } + + /** + * Merge all files to single IndexOutput + */ + private long mergeFilesToOutput(IndexOutput mergedOutput) throws IOException { + long mergeBytes = 0L; + for (Map.Entry entry : fileToEndDocIdMap.entrySet()) { + IndexInput input = tmpDirectory.openInput(entry.getKey(), state.context); + mergedOutput.copyBytes(input, input.length()); + mergeBytes += input.length(); + input.close(); + } + return mergeBytes; + } + + /** + * Delete the old star-tree.documents files + */ + private void deleteOldFiles() throws IOException { + for (String fileName : fileToEndDocIdMap.keySet()) { + tmpDirectory.deleteFile(fileName); + } + } + + /** + * Reset the star tree document offsets based on the merged file + */ + private void resetStarTreeDocumentOffsets() { + int curr = 0; + for (int i = 0; i < starTreeDocumentOffsets.size(); i++) { + starTreeDocumentOffsets.set(i, curr); + curr += docSizeInBytes; + } + } + + @Override + public void close() { + try { + if (starTreeDocsFileOutput != null) { + IOUtils.closeWhileHandlingException(starTreeDocsFileOutput); + try { + tmpDirectory.deleteFile(starTreeDocsFileOutput.getName()); + } catch (IOException ignored) {} // similar to IOUtils.deleteFilesIgnoringExceptions + } + } finally { + IOUtils.closeWhileHandlingException(starTreeDocsFileInput, starTreeDocsFileOutput); + } + // Delete all temporary star tree document files + for (String file : fileToEndDocIdMap.keySet()) { + try { + tmpDirectory.deleteFile(file); + } catch (IOException ignored) {} // similar to IOUtils.deleteFilesIgnoringExceptions + } + starTreeDocumentOffsets.clear(); + fileToEndDocIdMap.clear(); + } +} diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreesBuilder.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreesBuilder.java index 6c3d476aa3a55..3b376d7c34351 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreesBuilder.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreesBuilder.java @@ -75,7 +75,7 @@ public void build(Map fieldProducerMap) throws IOExce // Build all star-trees for (StarTreeField starTreeField : starTreeFields) { - try (StarTreeBuilder starTreeBuilder = getSingleTreeBuilder(starTreeField, state, mapperService)) { + try (StarTreeBuilder starTreeBuilder = getStarTreeBuilder(starTreeField, state, mapperService)) { starTreeBuilder.build(fieldProducerMap); } } @@ -102,9 +102,9 @@ public void buildDuringMerge(final Map> starTreeVal continue; } StarTreeField starTreeField = starTreeValuesList.get(0).getStarTreeField(); - StarTreeBuilder builder = getSingleTreeBuilder(starTreeField, state, mapperService); - builder.build(starTreeValuesList); - builder.close(); + try (StarTreeBuilder builder = getStarTreeBuilder(starTreeField, state, mapperService)) { + builder.build(starTreeValuesList); + } } logger.debug( "Took {} ms to merge {} star-trees with star-tree fields", @@ -116,14 +116,13 @@ public void buildDuringMerge(final Map> starTreeVal /** * Get star-tree builder based on build mode. */ - StarTreeBuilder getSingleTreeBuilder(StarTreeField starTreeField, SegmentWriteState state, MapperService mapperService) + StarTreeBuilder getStarTreeBuilder(StarTreeField starTreeField, SegmentWriteState state, MapperService mapperService) throws IOException { switch (starTreeField.getStarTreeConfig().getBuildMode()) { case ON_HEAP: return new OnHeapStarTreeBuilder(starTreeField, state, mapperService); case OFF_HEAP: - // TODO - // return new OffHeapStarTreeBuilder(starTreeField, state, mapperService); + return new OffHeapStarTreeBuilder(starTreeField, state, mapperService); default: throw new IllegalArgumentException( String.format( diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeDocumentBitSetUtil.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeDocumentBitSetUtil.java new file mode 100644 index 0000000000000..a508e497adcdf --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeDocumentBitSetUtil.java @@ -0,0 +1,57 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube.startree.utils; + +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.store.RandomAccessInput; +import org.opensearch.common.util.ByteArrayBackedBitset; + +import java.io.IOException; +import java.util.function.Function; + +/** + * Helper class to read/write bitset for null values and identity values. + */ +public class StarTreeDocumentBitSetUtil { + /** + * Write bitset for null values. + * + * @param array array of objects + * @param output output stream + * @return number of bytes written + * @throws IOException if an I/O error occurs while writing to the output stream + */ + public static int writeBitSet(Object[] array, IndexOutput output) throws IOException { + ByteArrayBackedBitset bitset = new ByteArrayBackedBitset(getLength(array)); + for (int i = 0; i < array.length; i++) { + if (array[i] == null) { + bitset.set(i); + } + } + return bitset.write(output); + } + + /** + * Set identity values based on bitset. + */ + public static int readBitSet(RandomAccessInput input, long offset, Object[] array, Function identityValueSupplier) + throws IOException { + ByteArrayBackedBitset bitset = new ByteArrayBackedBitset(input, offset, getLength(array)); + for (int i = 0; i < array.length; i++) { + if (bitset.get(i)) { + array[i] = identityValueSupplier.apply(i); + } + } + return bitset.getCurrBytesRead(); + } + + private static int getLength(Object[] array) { + return (array.length / 8) + (array.length % 8 == 0 ? 0 : 1); + } +} diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeDocumentsSorter.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeDocumentsSorter.java new file mode 100644 index 0000000000000..7b1c63bc611ee --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeDocumentsSorter.java @@ -0,0 +1,66 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube.startree.utils; + +import org.apache.lucene.util.IntroSorter; + +import java.util.Objects; +import java.util.function.IntFunction; + +/** + * Utility class for building star tree + */ +public class StarTreeDocumentsSorter { + /** + * Sort documents based on the dimension values off heap using intro sorter. + */ + public static void sort( + final int[] sortedDocIds, + final int dimensionId, + final int numDocs, + final IntFunction dimensionsReader + ) { + new IntroSorter() { + private Long[] dimensions; + + @Override + protected void swap(int i, int j) { + int temp = sortedDocIds[i]; + sortedDocIds[i] = sortedDocIds[j]; + sortedDocIds[j] = temp; + } + + @Override + protected void setPivot(int i) { + dimensions = dimensionsReader.apply(i); + } + + @Override + protected int comparePivot(int j) { + Long[] currentDimensions = dimensionsReader.apply(j); + for (int i = dimensionId + 1; i < dimensions.length; i++) { + Long dimension = currentDimensions[i]; + if (!Objects.equals(dimensions[i], dimension)) { + if (dimensions[i] == null && dimension == null) { + return 0; + } + if (dimension == null) { + return -1; + } + if (dimensions[i] == null) { + return 1; + } + return Long.compare(dimensions[i], dimension); + } + } + return 0; + } + }.sort(0, numDocs); + } +} diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/TreeNode.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/TreeNode.java index 5cf737c61ab2d..a5d59a2602633 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/TreeNode.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/TreeNode.java @@ -62,4 +62,8 @@ public class TreeNode { * A map containing the child nodes of this star-tree node, keyed by their dimension id. */ public Map children; + + public long getDimensionValue() { + return dimensionValue; + } } diff --git a/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java b/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java index d2debe762e9be..d9539f9dc0c82 100644 --- a/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java @@ -84,8 +84,7 @@ public static class Builder extends ParametrizedFieldMapper.Builder { List.of(XContentMapValues.nodeStringArrayValue(paramMap.getOrDefault(SKIP_STAR_NODE_IN_DIMS, new ArrayList()))) ); paramMap.remove(SKIP_STAR_NODE_IN_DIMS); - // TODO : change this to off heap once off heap gets implemented - StarTreeFieldConfiguration.StarTreeBuildMode buildMode = StarTreeFieldConfiguration.StarTreeBuildMode.ON_HEAP; + StarTreeFieldConfiguration.StarTreeBuildMode buildMode = StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP; List dimensions = buildDimensions(name, paramMap, context); paramMap.remove(ORDERED_DIMENSIONS); diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/AbstractStarTreeBuilderTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/AbstractStarTreeBuilderTests.java index 76a7875919a8b..131d7444ff91c 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/AbstractStarTreeBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/AbstractStarTreeBuilderTests.java @@ -55,6 +55,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; +import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Objects; @@ -355,19 +356,21 @@ public void test_sortAndAggregateStarTreeDocuments_nullMetricField() throws IOEx } } - public void test_sortAndAggregateStarTreeDocuments_nullDimensionField() throws IOException { + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/14813") + public void test_sortAndAggregateStarTreeDocuments_nullAndMinusOneInDimensionField() throws IOException { int noOfStarTreeDocuments = 5; StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; // Setting second metric iterator as empty sorted numeric , indicating a metric field is null starTreeDocuments[0] = new StarTreeDocument(new Long[] { 2L, null, 3L, 4L }, new Double[] { 12.0, null, randomDouble() }); - starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 10.0, null, randomDouble() }); - starTreeDocuments[2] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 14.0, null, randomDouble() }); + starTreeDocuments[1] = new StarTreeDocument(new Long[] { null, 4L, 2L, 1L }, new Double[] { 10.0, null, randomDouble() }); + starTreeDocuments[2] = new StarTreeDocument(new Long[] { null, 4L, 2L, 1L }, new Double[] { 14.0, null, randomDouble() }); starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, null, 3L, 4L }, new Double[] { 9.0, null, randomDouble() }); - starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 11.0, null, randomDouble() }); + starTreeDocuments[4] = new StarTreeDocument(new Long[] { -1L, 4L, 2L, 1L }, new Double[] { 11.0, null, randomDouble() }); List inorderStarTreeDocuments = List.of( new StarTreeDocument(new Long[] { 2L, null, 3L, 4L }, new Object[] { 21.0, 0.0, 2L }), - new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, 0.0, 3L }) + new StarTreeDocument(new Long[] { null, 4L, 2L, 1L }, new Object[] { 24.0, 0.0, 2L }), + new StarTreeDocument(new Long[] { -1L, 4L, 2L, 1L }, new Object[] { 11.0, 0.0, 1L }) ); Iterator expectedStarTreeDocumentIterator = inorderStarTreeDocuments.iterator(); @@ -388,8 +391,7 @@ public void test_sortAndAggregateStarTreeDocuments_nullDimensionField() throws I metricsIterators ); - while (segmentStarTreeDocumentIterator.hasNext() && expectedStarTreeDocumentIterator.hasNext()) { - StarTreeDocument resultStarTreeDocument = segmentStarTreeDocumentIterator.next(); + for (StarTreeDocument resultStarTreeDocument : builder.getStarTreeDocuments()) { StarTreeDocument expectedStarTreeDocument = expectedStarTreeDocumentIterator.next(); assertEquals(expectedStarTreeDocument.dimensions[0], resultStarTreeDocument.dimensions[0]); assertEquals(expectedStarTreeDocument.dimensions[1], resultStarTreeDocument.dimensions[1]); @@ -399,6 +401,8 @@ public void test_sortAndAggregateStarTreeDocuments_nullDimensionField() throws I assertEquals(expectedStarTreeDocument.metrics[1], resultStarTreeDocument.metrics[1]); assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); } + builder.build(segmentStarTreeDocumentIterator); + validateStarTree(builder.getRootNode(), 4, 1, builder.getStarTreeDocuments()); } public void test_sortAndAggregateStarTreeDocuments_nullDimensionsAndNullMetrics() throws IOException { @@ -411,7 +415,9 @@ public void test_sortAndAggregateStarTreeDocuments_nullDimensionsAndNullMetrics( starTreeDocuments[3] = new StarTreeDocument(new Long[] { null, null, null, null }, new Double[] { null, null, null }); starTreeDocuments[4] = new StarTreeDocument(new Long[] { null, null, null, null }, new Double[] { null, null, null }); - List inorderStarTreeDocuments = List.of(); + List inorderStarTreeDocuments = List.of( + new StarTreeDocument(new Long[] { null, null, null, null }, new Object[] { 0.0, 0.0, 5L }) + ); Iterator expectedStarTreeDocumentIterator = inorderStarTreeDocuments.iterator(); StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; @@ -446,6 +452,8 @@ public void test_sortAndAggregateStarTreeDocuments_nullDimensionsAndNullMetrics( assertEquals(expectedStarTreeDocument.metrics[1], resultStarTreeDocument.metrics[1]); assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); } + builder.build(segmentStarTreeDocumentIterator); + validateStarTree(builder.getRootNode(), 4, 1, builder.getStarTreeDocuments()); } public void test_sortAndAggregateStarTreeDocuments_emptyDimensions() throws IOException { @@ -595,6 +603,8 @@ public void test_sortAndAggregateStarTreeDocument_DoubleMaxAndDoubleMinMetrics() } assertEquals(inorderStarTreeDocuments.size(), numOfAggregatedDocuments); + builder.build(segmentStarTreeDocumentIterator); + validateStarTree(builder.getRootNode(), 3, 1, builder.getStarTreeDocuments()); } @@ -671,6 +681,7 @@ public void test_build_halfFloatMetrics() throws IOException { Iterator expectedStarTreeDocumentIterator = getExpectedStarTreeDocumentIterator(); assertStarTreeDocuments(resultStarTreeDocuments, expectedStarTreeDocumentIterator); + builder.build(expectedStarTreeDocumentIterator); } public void test_build_floatMetrics() throws IOException { @@ -975,6 +986,7 @@ public void test_build_starTreeDataset() throws IOException { assertEquals(expectedStarTreeDocument.dimensions[2], resultStarTreeDocument.dimensions[2]); assertEquals(expectedStarTreeDocument.metrics[0], resultStarTreeDocument.metrics[0]); } + validateStarTree(builder.getRootNode(), 3, 1, builder.getStarTreeDocuments()); } private static Map> getExpectedDimToValueMap() { @@ -1055,7 +1067,7 @@ public void testFlushFlow() throws IOException { SortedNumericDocValues m1sndv = getSortedNumericMock(metricsList, metricsWithField); SortedNumericDocValues m2sndv = getSortedNumericMock(metricsList, metricsWithField); - OnHeapStarTreeBuilder builder = new OnHeapStarTreeBuilder(sf, getWriteState(6), mapperService); + builder = getStarTreeBuilder(sf, getWriteState(6), mapperService); SequentialDocValuesIterator[] dimDvs = { new SequentialDocValuesIterator(d1sndv), new SequentialDocValuesIterator(d2sndv) }; Iterator starTreeDocumentIterator = builder.sortAndAggregateSegmentDocuments( dimDvs, @@ -1081,6 +1093,62 @@ public void testFlushFlow() throws IOException { assertEquals(1L, starTreeDocument.metrics[1]); } assertEquals(6, count); + builder.build(starTreeDocumentIterator); + validateStarTree(builder.getRootNode(), 2, 1, builder.getStarTreeDocuments()); + } + + public void testFlushFlowDimsReverse() throws IOException { + List dimList = List.of(5L, 4L, 3L, 2L, 1L); + List docsWithField = List.of(0, 1, 2, 3, 4); + List dimList2 = List.of(5L, 4L, 3L, 2L, 1L, 0L); + List docsWithField2 = List.of(0, 1, 2, 3, 4, 5); + + List metricsList = List.of( + getLongFromDouble(50.0), + getLongFromDouble(40.0), + getLongFromDouble(30.0), + getLongFromDouble(20.0), + getLongFromDouble(10.0), + getLongFromDouble(0.0) + ); + List metricsWithField = List.of(0, 1, 2, 3, 4, 5); + + StarTreeField sf = getStarTreeFieldWithMultipleMetrics(); + SortedNumericDocValues d1sndv = getSortedNumericMock(dimList, docsWithField); + SortedNumericDocValues d2sndv = getSortedNumericMock(dimList2, docsWithField2); + SortedNumericDocValues m1sndv = getSortedNumericMock(metricsList, metricsWithField); + SortedNumericDocValues m2sndv = getSortedNumericMock(metricsList, metricsWithField); + + builder = getStarTreeBuilder(sf, getWriteState(6), mapperService); + SequentialDocValuesIterator[] dimDvs = { new SequentialDocValuesIterator(d1sndv), new SequentialDocValuesIterator(d2sndv) }; + Iterator starTreeDocumentIterator = builder.sortAndAggregateSegmentDocuments( + dimDvs, + List.of(new SequentialDocValuesIterator(m1sndv), new SequentialDocValuesIterator(m2sndv)) + ); + /** + * Asserting following dim / metrics [ dim1, dim2 / Sum [metric], count [metric] ] + [1, 1] | [10.0, 1] + [2, 2] | [20.0, 1] + [3, 3] | [30.0, 1] + [4, 4] | [40.0, 1] + [5, 5] | [50.0, 1] + [null, 0] | [0.0, 1] + */ + int count = 0; + while (starTreeDocumentIterator.hasNext()) { + count++; + StarTreeDocument starTreeDocument = starTreeDocumentIterator.next(); + if (starTreeDocument.dimensions[0] != null) { + assertEquals(count, (long) starTreeDocument.dimensions[0]); + } else { + assertEquals(6, count); + } + assertEquals(starTreeDocument.dimensions[1] * 10.0, starTreeDocument.metrics[0]); + assertEquals(1L, starTreeDocument.metrics[1]); + } + assertEquals(6, count); + builder.build(starTreeDocumentIterator); + validateStarTree(builder.getRootNode(), 2, 1, builder.getStarTreeDocuments()); } public void testFlushFlowBuild() throws IOException { @@ -1120,7 +1188,7 @@ public void testFlushFlowBuild() throws IOException { SortedNumericDocValues d2sndv = getSortedNumericMock(dimList2, docsWithField2); SortedNumericDocValues m1sndv = getSortedNumericMock(metricsList, metricsWithField); - BaseStarTreeBuilder builder = getStarTreeBuilder(sf, getWriteState(100), mapperService); + builder = getStarTreeBuilder(sf, getWriteState(100), mapperService); DocValuesProducer d1vp = getDocValuesProducer(d1sndv); DocValuesProducer d2vp = getDocValuesProducer(d2sndv); @@ -1147,7 +1215,7 @@ public void testFlushFlowBuild() throws IOException { starTreeDocument.metrics[0] ); } - builder.close(); + validateStarTree(builder.getRootNode(), 2, 1, builder.getStarTreeDocuments()); } private static DocValuesProducer getDocValuesProducer(SortedNumericDocValues sndv) { @@ -1209,7 +1277,7 @@ public void testMergeFlowWithSum() throws IOException { sf, "6" ); - OnHeapStarTreeBuilder builder = new OnHeapStarTreeBuilder(sf, getWriteState(6), mapperService); + builder = getStarTreeBuilder(sf, getWriteState(6), mapperService); Iterator starTreeDocumentIterator = builder.mergeStarTrees(List.of(starTreeValues, starTreeValues2)); /** * Asserting following dim / metrics [ dim1, dim2 / Sum [ metric] ] @@ -1232,6 +1300,8 @@ public void testMergeFlowWithSum() throws IOException { ); } assertEquals(6, count); + builder.build(starTreeDocumentIterator); + validateStarTree(builder.getRootNode(), 2, 1, builder.getStarTreeDocuments()); } public void testMergeFlowWithCount() throws IOException { @@ -1259,7 +1329,7 @@ public void testMergeFlowWithCount() throws IOException { sf, "6" ); - OnHeapStarTreeBuilder builder = new OnHeapStarTreeBuilder(sf, getWriteState(6), mapperService); + builder = getStarTreeBuilder(sf, getWriteState(6), mapperService); Iterator starTreeDocumentIterator = builder.mergeStarTrees(List.of(starTreeValues, starTreeValues2)); /** * Asserting following dim / metrics [ dim1, dim2 / Count [ metric] ] @@ -1279,6 +1349,9 @@ public void testMergeFlowWithCount() throws IOException { assertEquals(starTreeDocument.dimensions[0] != null ? starTreeDocument.dimensions[0] * 2 : 4, starTreeDocument.metrics[0]); } assertEquals(6, count); + builder.build(starTreeDocumentIterator); + validateStarTree(builder.getRootNode(), 2, 1, builder.getStarTreeDocuments()); + } private StarTreeValues getStarTreeValues( @@ -1336,7 +1409,7 @@ public void testMergeFlowWithDifferentDocsFromSegments() throws IOException { sf, "4" ); - OnHeapStarTreeBuilder builder = new OnHeapStarTreeBuilder(sf, getWriteState(4), mapperService); + builder = getStarTreeBuilder(sf, getWriteState(4), mapperService); Iterator starTreeDocumentIterator = builder.mergeStarTrees(List.of(starTreeValues, starTreeValues2)); /** * Asserting following dim / metrics [ dim1, dim2 / Count [ metric] ] @@ -1361,6 +1434,68 @@ public void testMergeFlowWithDifferentDocsFromSegments() throws IOException { } } assertEquals(9, count); + builder.build(starTreeDocumentIterator); + validateStarTree(builder.getRootNode(), 2, 1, builder.getStarTreeDocuments()); + } + + public void testMergeFlowNumSegmentsDocs() throws IOException { + List dimList = List.of(0L, 1L, 2L, 3L, 4L, 5L, 6L, -1L, -1L, -1L); + List docsWithField = List.of(0, 1, 2, 3, 4, 5, 6, 7, 8, 9); + List dimList2 = List.of(0L, 1L, 2L, 3L, 4L, 5L, 6L, -1L, -1L, -1L); + List docsWithField2 = List.of(0, 1, 2, 3, 4, 5, 6, 7, 8, 9); + + List metricsList = List.of(0L, 1L, 2L, 3L, 4L, 5L, 6L, -1L, -1L, -1L); + List metricsWithField = List.of(0, 1, 2, 3, 4, 5, 6, 7, 8, 9); + + List dimList3 = List.of(5L, 6L, 7L, 8L, -1L); + List docsWithField3 = List.of(0, 1, 2, 3, 4); + List dimList4 = List.of(5L, 6L, 7L, 8L, -1L); + List docsWithField4 = List.of(0, 1, 2, 3, 4); + + List metricsList2 = List.of(5L, 6L, 7L, 8L, 9L); + List metricsWithField2 = List.of(0, 1, 2, 3, 4); + + StarTreeField sf = getStarTreeField(MetricStat.COUNT); + StarTreeValues starTreeValues = getStarTreeValues( + getSortedNumericMock(dimList, docsWithField), + getSortedNumericMock(dimList2, docsWithField2), + getSortedNumericMock(metricsList, metricsWithField), + sf, + "6" + ); + + StarTreeValues starTreeValues2 = getStarTreeValues( + getSortedNumericMock(dimList3, docsWithField3), + getSortedNumericMock(dimList4, docsWithField4), + getSortedNumericMock(metricsList2, metricsWithField2), + sf, + "4" + ); + builder = getStarTreeBuilder(sf, getWriteState(4), mapperService); + Iterator starTreeDocumentIterator = builder.mergeStarTrees(List.of(starTreeValues, starTreeValues2)); + /** + * Asserting following dim / metrics [ dim1, dim2 / Count [ metric] ] + [0, 0] | [0] + [1, 1] | [1] + [2, 2] | [2] + [3, 3] | [3] + [4, 4] | [4] + [5, 5] | [10] + [6, 6] | [6] + [7, 7] | [7] + [8, 8] | [8] + */ + int count = 0; + while (starTreeDocumentIterator.hasNext()) { + count++; + StarTreeDocument starTreeDocument = starTreeDocumentIterator.next(); + if (Objects.equals(starTreeDocument.dimensions[0], 5L)) { + assertEquals(starTreeDocument.dimensions[0] * 2, starTreeDocument.metrics[0]); + } else { + assertEquals(starTreeDocument.dimensions[1], starTreeDocument.metrics[0]); + } + } + assertEquals(9, count); } public void testMergeFlowWithMissingDocs() throws IOException { @@ -1396,7 +1531,7 @@ public void testMergeFlowWithMissingDocs() throws IOException { sf, "4" ); - OnHeapStarTreeBuilder builder = new OnHeapStarTreeBuilder(sf, getWriteState(4), mapperService); + builder = getStarTreeBuilder(sf, getWriteState(4), mapperService); Iterator starTreeDocumentIterator = builder.mergeStarTrees(List.of(starTreeValues, starTreeValues2)); /** * Asserting following dim / metrics [ dim1, dim2 / Count [ metric] ] @@ -1421,6 +1556,138 @@ public void testMergeFlowWithMissingDocs() throws IOException { assertEquals(starTreeDocument.dimensions[1], starTreeDocument.metrics[0]); } assertEquals(10, count); + builder.build(starTreeDocumentIterator); + validateStarTree(builder.getRootNode(), 2, 1, builder.getStarTreeDocuments()); + } + + public void testMergeFlowWithMissingDocsWithZero() throws IOException { + List dimList = List.of(0L, 0L, 0L, 0L); + List docsWithField = List.of(0, 1, 2, 6); + List dimList2 = List.of(0L, 0L, 0L, 0L); + List docsWithField2 = List.of(0, 1, 2, 6); + + List metricsList = List.of(0L, 1L, 2L, 3L, 4L, 5L, 6L); + List metricsWithField = List.of(0, 1, 2, 3, 4, 5, 6); + + List dimList3 = List.of(5L, 6L, 8L, -1L); + List docsWithField3 = List.of(0, 1, 3, 4); + List dimList4 = List.of(5L, 6L, 7L, 8L, -1L); + List docsWithField4 = List.of(0, 1, 2, 3, 4); + + List metricsList2 = List.of(5L, 6L, 7L, 8L, 9L); + List metricsWithField2 = List.of(0, 1, 2, 3, 4); + + StarTreeField sf = getStarTreeField(MetricStat.COUNT); + StarTreeValues starTreeValues = getStarTreeValues( + getSortedNumericMock(dimList, docsWithField), + getSortedNumericMock(dimList2, docsWithField2), + getSortedNumericMock(metricsList, metricsWithField), + sf, + "7" + ); + + StarTreeValues starTreeValues2 = getStarTreeValues( + getSortedNumericMock(dimList3, docsWithField3), + getSortedNumericMock(dimList4, docsWithField4), + getSortedNumericMock(metricsList2, metricsWithField2), + sf, + "4" + ); + builder = getStarTreeBuilder(sf, getWriteState(4), mapperService); + Iterator starTreeDocumentIterator = builder.mergeStarTrees(List.of(starTreeValues, starTreeValues2)); + /** + * Asserting following dim / metrics [ dim1, dim2 / Count [ metric] ] + [0, 0] | [9] + [5, 5] | [5] + [6, 6] | [6] + [8, 8] | [8] + [null, 7] | [7] + [null, null] | [12] + */ + int count = 0; + while (starTreeDocumentIterator.hasNext()) { + count++; + StarTreeDocument starTreeDocument = starTreeDocumentIterator.next(); + if (starTreeDocument.dimensions[0] == null && starTreeDocument.dimensions[1] == null) { + assertEquals(12L, (long) starTreeDocument.metrics[0]); + } else if (starTreeDocument.dimensions[0] == null) { + assertEquals(7L, starTreeDocument.metrics[0]); + } else if (starTreeDocument.dimensions[0] == 0) { + assertEquals(9L, starTreeDocument.metrics[0]); + } else { + assertEquals(starTreeDocument.dimensions[1], starTreeDocument.metrics[0]); + } + } + assertEquals(6, count); + builder.build(starTreeDocumentIterator); + validateStarTree(builder.getRootNode(), 2, 1, builder.getStarTreeDocuments()); + } + + public void testMergeFlowWithMissingDocsWithZeroComplexCase() throws IOException { + List dimList = List.of(0L, 0L, 0L, 0L, 0L); + List docsWithField = List.of(0, 1, 2, 6, 8); + List dimList2 = List.of(0L, 0L, 0L, 0L); + List docsWithField2 = List.of(0, 1, 2, 6); + + List metricsList = List.of(0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L); + List metricsWithField = List.of(0, 1, 2, 3, 4, 5, 6, 7, 8, 9); + + List dimList3 = List.of(5L, 6L, 8L, -1L); + List docsWithField3 = List.of(0, 1, 3, 4); + List dimList4 = List.of(5L, 6L, 7L, 8L, -1L); + List docsWithField4 = List.of(0, 1, 2, 3, 4); + + List metricsList2 = List.of(5L, 6L, 7L, 8L, 9L); + List metricsWithField2 = List.of(0, 1, 2, 3, 4); + + StarTreeField sf = getStarTreeField(MetricStat.COUNT); + StarTreeValues starTreeValues = getStarTreeValues( + getSortedNumericMock(dimList, docsWithField), + getSortedNumericMock(dimList2, docsWithField2), + getSortedNumericMock(metricsList, metricsWithField), + sf, + "9" + ); + + StarTreeValues starTreeValues2 = getStarTreeValues( + getSortedNumericMock(dimList3, docsWithField3), + getSortedNumericMock(dimList4, docsWithField4), + getSortedNumericMock(metricsList2, metricsWithField2), + sf, + "4" + ); + builder = getStarTreeBuilder(sf, getWriteState(4), mapperService); + Iterator starTreeDocumentIterator = builder.mergeStarTrees(List.of(starTreeValues, starTreeValues2)); + /** + * Asserting following dim / metrics [ dim1, dim2 / Count [ metric] ] + [0, 0] | [9] + [0, null] | [8] + [5, 5] | [5] + [6, 6] | [6] + [8, 8] | [8] + [null, 7] | [7] + [null, null] | [19] + */ + int count = 0; + while (starTreeDocumentIterator.hasNext()) { + count++; + StarTreeDocument starTreeDocument = starTreeDocumentIterator.next(); + if (starTreeDocument.dimensions[0] == null && starTreeDocument.dimensions[1] == null) { + assertEquals(19L, (long) starTreeDocument.metrics[0]); + assertEquals(7, count); + } else if (starTreeDocument.dimensions[0] == null) { + assertEquals(7L, starTreeDocument.metrics[0]); + } else if (starTreeDocument.dimensions[1] == null) { + assertEquals(8L, starTreeDocument.metrics[0]); + } else if (starTreeDocument.dimensions[0] == 0) { + assertEquals(9L, starTreeDocument.metrics[0]); + } else { + assertEquals(starTreeDocument.dimensions[1], starTreeDocument.metrics[0]); + } + } + assertEquals(7, count); + builder.build(starTreeDocumentIterator); + validateStarTree(builder.getRootNode(), 2, 1, builder.getStarTreeDocuments()); } public void testMergeFlowWithMissingDocsInSecondDim() throws IOException { @@ -1456,7 +1723,7 @@ public void testMergeFlowWithMissingDocsInSecondDim() throws IOException { sf, "4" ); - OnHeapStarTreeBuilder builder = new OnHeapStarTreeBuilder(sf, getWriteState(4), mapperService); + builder = getStarTreeBuilder(sf, getWriteState(4), mapperService); Iterator starTreeDocumentIterator = builder.mergeStarTrees(List.of(starTreeValues, starTreeValues2)); /** * Asserting following dim / metrics [ dim1, dim2 / Count [ metric] ] @@ -1482,6 +1749,8 @@ public void testMergeFlowWithMissingDocsInSecondDim() throws IOException { } } assertEquals(10, count); + builder.build(starTreeDocumentIterator); + validateStarTree(builder.getRootNode(), 2, 1, builder.getStarTreeDocuments()); } public void testMergeFlowWithDocsMissingAtTheEnd() throws IOException { @@ -1517,7 +1786,7 @@ public void testMergeFlowWithDocsMissingAtTheEnd() throws IOException { sf, "4" ); - OnHeapStarTreeBuilder builder = new OnHeapStarTreeBuilder(sf, writeState, mapperService); + builder = getStarTreeBuilder(sf, writeState, mapperService); Iterator starTreeDocumentIterator = builder.mergeStarTrees(List.of(starTreeValues, starTreeValues2)); /** * Asserting following dim / metrics [ dim1, dim2 / Count [ metric] ] @@ -1542,6 +1811,8 @@ public void testMergeFlowWithDocsMissingAtTheEnd() throws IOException { assertEquals(starTreeDocument.dimensions[1], starTreeDocument.metrics[0]); } assertEquals(10, count); + builder.build(starTreeDocumentIterator); + validateStarTree(builder.getRootNode(), 2, 1, builder.getStarTreeDocuments()); } public void testMergeFlowWithEmptyFieldsInOneSegment() throws IOException { @@ -1569,7 +1840,7 @@ public void testMergeFlowWithEmptyFieldsInOneSegment() throws IOException { sf, "0" ); - OnHeapStarTreeBuilder builder = new OnHeapStarTreeBuilder(sf, getWriteState(0), mapperService); + builder = getStarTreeBuilder(sf, getWriteState(0), mapperService); Iterator starTreeDocumentIterator = builder.mergeStarTrees(List.of(starTreeValues, starTreeValues2)); /** * Asserting following dim / metrics [ dim1, dim2 / Count [ metric] ] @@ -1590,6 +1861,8 @@ public void testMergeFlowWithEmptyFieldsInOneSegment() throws IOException { assertEquals(starTreeDocument.dimensions[1], starTreeDocument.metrics[0]); } assertEquals(6, count); + builder.build(starTreeDocumentIterator); + validateStarTree(builder.getRootNode(), 2, 1, builder.getStarTreeDocuments()); } public void testMergeFlowWithDuplicateDimensionValues() throws IOException { @@ -1664,8 +1937,8 @@ public void testMergeFlowWithDuplicateDimensionValues() throws IOException { metricsWithField, sf ); - OnHeapStarTreeBuilder builder = new OnHeapStarTreeBuilder(sf, writeState, mapperService); - builder.build(List.of(starTreeValues, starTreeValues2)); + builder = getStarTreeBuilder(sf, writeState, mapperService); + builder.build(builder.mergeStarTrees(List.of(starTreeValues, starTreeValues2))); List starTreeDocuments = builder.getStarTreeDocuments(); assertEquals(401, starTreeDocuments.size()); int count = 0; @@ -1693,7 +1966,7 @@ public void testMergeFlowWithDuplicateDimensionValues() throws IOException { count++; } assertEquals(401, count); - builder.close(); + validateStarTree(builder.getRootNode(), 4, sf.getStarTreeConfig().maxLeafDocs(), builder.getStarTreeDocuments()); } public void testMergeFlowWithMaxLeafDocs() throws IOException { @@ -1774,8 +2047,8 @@ public void testMergeFlowWithMaxLeafDocs() throws IOException { sf ); - OnHeapStarTreeBuilder builder = new OnHeapStarTreeBuilder(sf, writeState, mapperService); - builder.build(List.of(starTreeValues, starTreeValues2)); + builder = getStarTreeBuilder(sf, writeState, mapperService); + builder.build(builder.mergeStarTrees(List.of(starTreeValues, starTreeValues2))); List starTreeDocuments = builder.getStarTreeDocuments(); /** 635 docs get generated @@ -1790,7 +2063,7 @@ public void testMergeFlowWithMaxLeafDocs() throws IOException { [null, null, null, null] | [2495000.0] */ assertEquals(635, starTreeDocuments.size()); - builder.close(); + validateStarTree(builder.getRootNode(), 4, sf.getStarTreeConfig().maxLeafDocs(), builder.getStarTreeDocuments()); } private StarTreeValues getStarTreeValues( @@ -1892,11 +2165,11 @@ public void testMergeFlowWithDuplicateDimensionValueWithMaxLeafDocs() throws IOE metricsWithField, sf ); - OnHeapStarTreeBuilder builder = new OnHeapStarTreeBuilder(sf, writeState, mapperService); - builder.build(List.of(starTreeValues, starTreeValues2)); + builder = getStarTreeBuilder(sf, writeState, mapperService); + builder.build(builder.mergeStarTrees(List.of(starTreeValues, starTreeValues2))); List starTreeDocuments = builder.getStarTreeDocuments(); assertEquals(401, starTreeDocuments.size()); - builder.close(); + validateStarTree(builder.getRootNode(), 4, sf.getStarTreeConfig().maxLeafDocs(), builder.getStarTreeDocuments()); } public static long getLongFromDouble(double value) { @@ -1991,8 +2264,8 @@ public void testMergeFlowWithMaxLeafDocsAndStarTreeNodesAssertion() throws IOExc metricsWithField, sf ); - OnHeapStarTreeBuilder builder = new OnHeapStarTreeBuilder(sf, writeState, mapperService); - builder.build(List.of(starTreeValues, starTreeValues2)); + builder = getStarTreeBuilder(sf, writeState, mapperService); + builder.build(builder.mergeStarTrees(List.of(starTreeValues, starTreeValues2))); List starTreeDocuments = builder.getStarTreeDocuments(); Map> dimValueToDocIdMap = new HashMap<>(); traverseStarTree(builder.rootNode, dimValueToDocIdMap, true); @@ -2007,7 +2280,7 @@ public void testMergeFlowWithMaxLeafDocsAndStarTreeNodesAssertion() throws IOExc } } assertEquals(1041, starTreeDocuments.size()); - builder.close(); + validateStarTree(builder.getRootNode(), 4, sf.getStarTreeConfig().maxLeafDocs(), builder.getStarTreeDocuments()); } private static StarTreeField getStarTreeField(int maxLeafDocs) { @@ -2151,7 +2424,7 @@ public void testMergeFlow() throws IOException { getAttributes(1000) ); - BaseStarTreeBuilder builder = getStarTreeBuilder(sf, writeState, mapperService); + builder = getStarTreeBuilder(sf, writeState, mapperService); Iterator starTreeDocumentIterator = builder.mergeStarTrees(List.of(starTreeValues, starTreeValues2)); /** [0, 0, 0, 0] | [0.0] @@ -2163,11 +2436,183 @@ public void testMergeFlow() throws IOException { ... [999, 999, 999, 999] | [19980.0] */ - while (starTreeDocumentIterator.hasNext()) { - StarTreeDocument starTreeDocument = starTreeDocumentIterator.next(); + for (StarTreeDocument starTreeDocument : builder.getStarTreeDocuments()) { assertEquals(starTreeDocument.dimensions[0] * 20.0, starTreeDocument.metrics[0]); } - builder.close(); + builder.build(starTreeDocumentIterator); + + // Validate the star tree structure + validateStarTree(builder.getRootNode(), 4, 1, builder.getStarTreeDocuments()); + } + + private void validateStarTree(TreeNode root, int totalDimensions, int maxLeafDocuments, List starTreeDocuments) { + Queue queue = new LinkedList<>(); + queue.offer(new Object[] { root, false }); + while (!queue.isEmpty()) { + Object[] current = queue.poll(); + TreeNode node = (TreeNode) current[0]; + boolean currentIsStarNode = (boolean) current[1]; + + assertNotNull(node); + + // assert dimensions + if (node.dimensionId != TreeNode.ALL) { + assertTrue(node.dimensionId >= 0 && node.dimensionId < totalDimensions); + } + if (node.children != null && !node.children.isEmpty()) { + assertEquals(node.dimensionId + 1, node.childDimensionId); + assertTrue(node.childDimensionId < totalDimensions); + TreeNode starNode = null; + Object[] nonStarNodeCumulativeMetrics = getMetrics(starTreeDocuments); + for (Map.Entry entry : node.children.entrySet()) { + Long childDimensionValue = entry.getKey(); + TreeNode child = entry.getValue(); + Object[] currMetrics = getMetrics(starTreeDocuments); + if (!child.isStarNode) { + // Validate dimension values in documents + for (int i = child.startDocId; i < child.endDocId; i++) { + StarTreeDocument doc = starTreeDocuments.get(i); + int j = 0; + addMetrics(doc, currMetrics, j); + if (!child.isStarNode) { + Long dimension = doc.dimensions[child.dimensionId]; + assertEquals(childDimensionValue, dimension); + if (dimension != null) { + assertEquals(child.dimensionValue, (long) dimension); + } else { + // TODO : fix this ? + assertEquals(child.dimensionValue, TreeNode.ALL); + } + } + } + Object[] aggregatedMetrics = starTreeDocuments.get(child.aggregatedDocId).metrics; + int j = 0; + for (Object metric : currMetrics) { + /* + * TODO : refactor this to handle any data type + */ + if (metric instanceof Double) { + nonStarNodeCumulativeMetrics[j] = (double) nonStarNodeCumulativeMetrics[j] + (double) metric; + assertEquals((Double) metric, (Double) aggregatedMetrics[j], 0); + } else if (metric instanceof Long) { + nonStarNodeCumulativeMetrics[j] = (long) nonStarNodeCumulativeMetrics[j] + (long) metric; + assertEquals((long) metric, (long) aggregatedMetrics[j]); + } else if (metric instanceof Float) { + nonStarNodeCumulativeMetrics[j] = (float) nonStarNodeCumulativeMetrics[j] + (float) metric; + assertEquals((float) metric, (float) aggregatedMetrics[j], 0); + } + j++; + } + queue.offer(new Object[] { child, false }); + } else { + starNode = child; + } + } + // Add star node to queue + if (starNode != null) { + Object[] starNodeMetrics = getMetrics(starTreeDocuments); + for (int i = starNode.startDocId; i < starNode.endDocId; i++) { + StarTreeDocument doc = starTreeDocuments.get(i); + int j = 0; + addMetrics(doc, starNodeMetrics, j); + } + int j = 0; + Object[] aggregatedMetrics = starTreeDocuments.get(starNode.aggregatedDocId).metrics; + for (Object nonStarNodeCumulativeMetric : nonStarNodeCumulativeMetrics) { + assertEquals(nonStarNodeCumulativeMetric, starNodeMetrics[j]); + assertEquals(starNodeMetrics[j], aggregatedMetrics[j]); + /* + * TODO : refactor this to handle any data type + */ + if (nonStarNodeCumulativeMetric instanceof Double) { + assertEquals((double) nonStarNodeCumulativeMetric, (double) starNodeMetrics[j], 0); + assertEquals((double) nonStarNodeCumulativeMetric, (double) aggregatedMetrics[j], 0); + } else if (nonStarNodeCumulativeMetric instanceof Long) { + assertEquals((long) nonStarNodeCumulativeMetric, (long) starNodeMetrics[j]); + assertEquals((long) nonStarNodeCumulativeMetric, (long) aggregatedMetrics[j]); + } else if (nonStarNodeCumulativeMetric instanceof Float) { + assertEquals((float) nonStarNodeCumulativeMetric, (float) starNodeMetrics[j], 0); + assertEquals((float) nonStarNodeCumulativeMetric, (float) aggregatedMetrics[j], 0); + } + + j++; + } + assertEquals(-1L, starNode.dimensionValue); + queue.offer(new Object[] { starNode, true }); + } + } else { + assertTrue(node.endDocId - node.startDocId <= maxLeafDocuments); + } + + if (currentIsStarNode) { + StarTreeDocument prevDoc = null; + int docCount = 0; + int docId = node.startDocId; + int dimensionId = node.dimensionId; + + while (docId < node.endDocId) { + StarTreeDocument currentDoc = starTreeDocuments.get(docId); + docCount++; + + // Verify that the dimension at 'dimensionId' is set to STAR_IN_DOC_VALUES_INDEX + assertNull(currentDoc.dimensions[dimensionId]); + + // Verify sorting of documents + if (prevDoc != null) { + assertTrue(compareDocuments(prevDoc, currentDoc, dimensionId + 1, totalDimensions) <= 0); + } + prevDoc = currentDoc; + docId++; + } + + // Verify that the number of generated star documents matches the range in the star node + assertEquals(node.endDocId - node.startDocId, docCount); + } + } + } + + /** + * TODO : refactor this to handle any data type + */ + private static void addMetrics(StarTreeDocument doc, Object[] currMetrics, int j) { + for (Object metric : doc.metrics) { + if (metric instanceof Double) { + currMetrics[j] = (double) currMetrics[j] + (double) metric; + } else if (metric instanceof Long) { + currMetrics[j] = (long) currMetrics[j] + (long) metric; + } else if (metric instanceof Float) { + currMetrics[j] = (float) currMetrics[j] + (float) metric; + } + j++; + } + } + + private static Object[] getMetrics(List starTreeDocuments) { + Object[] nonStarNodeCumulativeMetrics = new Object[starTreeDocuments.get(0).metrics.length]; + for (int i = 0; i < nonStarNodeCumulativeMetrics.length; i++) { + if (starTreeDocuments.get(0).metrics[i] instanceof Long) { + nonStarNodeCumulativeMetrics[i] = 0L; + } else if (starTreeDocuments.get(0).metrics[i] instanceof Double) { + nonStarNodeCumulativeMetrics[i] = 0.0; + } else if (starTreeDocuments.get(0).metrics[i] instanceof Float) { + nonStarNodeCumulativeMetrics[i] = 0.0f; + } + } + return nonStarNodeCumulativeMetrics; + } + + private int compareDocuments(StarTreeDocument doc1, StarTreeDocument doc2, int startDim, int endDim) { + for (int i = startDim; i < endDim; i++) { + Long val1 = doc1.dimensions[i]; + Long val2 = doc2.dimensions[i]; + + if (!Objects.equals(val1, val2)) { + if (val1 == null) return 1; + if (val2 == null) return -1; + return Long.compare(val1, val2); + } + } + return 0; } Map getAttributes(int numSegmentDocs) { diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/OffHeapStarTreeBuilderTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/OffHeapStarTreeBuilderTests.java new file mode 100644 index 0000000000000..92382b78f60c6 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/OffHeapStarTreeBuilderTests.java @@ -0,0 +1,26 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube.startree.builder; + +import org.apache.lucene.index.SegmentWriteState; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; +import org.opensearch.index.mapper.MapperService; + +import java.io.IOException; + +public class OffHeapStarTreeBuilderTests extends AbstractStarTreeBuilderTests { + @Override + public BaseStarTreeBuilder getStarTreeBuilder( + StarTreeField starTreeField, + SegmentWriteState segmentWriteState, + MapperService mapperService + ) throws IOException { + return new OffHeapStarTreeBuilder(starTreeField, segmentWriteState, mapperService); + } +} diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreesBuilderTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreesBuilderTests.java index 564ab110fa7a5..828bddfb8aa6e 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreesBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreesBuilderTests.java @@ -97,18 +97,10 @@ public void test_buildWithNoStarTreeFields() throws IOException { public void test_getStarTreeBuilder() throws IOException { when(mapperService.getCompositeFieldTypes()).thenReturn(Set.of(starTreeFieldType)); StarTreesBuilder starTreesBuilder = new StarTreesBuilder(segmentWriteState, mapperService); - StarTreeBuilder starTreeBuilder = starTreesBuilder.getSingleTreeBuilder(starTreeField, segmentWriteState, mapperService); + StarTreeBuilder starTreeBuilder = starTreesBuilder.getStarTreeBuilder(starTreeField, segmentWriteState, mapperService); assertTrue(starTreeBuilder instanceof OnHeapStarTreeBuilder); } - public void test_getStarTreeBuilder_illegalArgument() { - when(mapperService.getCompositeFieldTypes()).thenReturn(Set.of(starTreeFieldType)); - StarTreeFieldConfiguration starTreeFieldConfiguration = new StarTreeFieldConfiguration(1, new HashSet<>(), StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP); - StarTreeField starTreeField = new StarTreeField("star_tree", new ArrayList<>(), new ArrayList<>(), starTreeFieldConfiguration); - StarTreesBuilder starTreesBuilder = new StarTreesBuilder(segmentWriteState, mapperService); - assertThrows(IllegalArgumentException.class, () -> starTreesBuilder.getSingleTreeBuilder(starTreeField, segmentWriteState, mapperService)); - } - public void test_closeWithNoStarTreeFields() throws IOException { StarTreeFieldConfiguration starTreeFieldConfiguration = new StarTreeFieldConfiguration( 1, diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/utils/SequentialDocValuesIteratorTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/utils/SequentialDocValuesIteratorTests.java index dfc83125b2806..f56f7d9906ae1 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/utils/SequentialDocValuesIteratorTests.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/utils/SequentialDocValuesIteratorTests.java @@ -127,7 +127,5 @@ public void test_multipleCoordinatedDocumentReader() throws IOException { assertNotEquals(0, sequentialDocValuesIterator2.getDocId()); assertEquals(1, sequentialDocValuesIterator2.getDocId()); assertEquals(9L, (long) sequentialDocValuesIterator2.value(1)); - } - } diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeDocumentBitSetUtilTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeDocumentBitSetUtilTests.java new file mode 100644 index 0000000000000..7d1bd37246fae --- /dev/null +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeDocumentBitSetUtilTests.java @@ -0,0 +1,72 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube.startree.utils; + +import org.apache.lucene.store.FSDirectory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.store.RandomAccessInput; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.function.Function; + +/** + * Unit tests for {@link StarTreeDocumentBitSetUtil} + */ +public class StarTreeDocumentBitSetUtilTests extends OpenSearchTestCase { + + public void testWriteAndReadNullBitSets() throws IOException { + for (int k = 0; k < 10; k++) { + int randomArraySize = randomIntBetween(2, 256); + Long[] dims = new Long[randomArraySize]; + for (int i = 0; i < randomArraySize; i++) { + dims[i] = randomLong(); + } + testNullBasedOnBitset(dims); + } + } + + void testNullBasedOnBitset(Long[] dims) throws IOException { + Long[] dims1 = Arrays.copyOf(dims, dims.length); + int randomNullIndex1 = randomIntBetween(0, dims.length - 1); + int randomNullIndex2 = randomIntBetween(0, dims.length - 1); + dims[randomNullIndex1] = null; + dims[randomNullIndex2] = null; + Path basePath = createTempDir("OffHeapTests"); + FSDirectory fsDirectory = FSDirectory.open(basePath); + String TEST_FILE = "test_file"; + IndexOutput indexOutput = fsDirectory.createOutput(TEST_FILE, IOContext.DEFAULT); + StarTreeDocumentBitSetUtil.writeBitSet(dims, indexOutput); + indexOutput.close(); + + // test null value on read + IndexInput in = fsDirectory.openInput(TEST_FILE, IOContext.DEFAULT); + RandomAccessInput randomAccessInput = in.randomAccessSlice(0, in.length()); + Function identityValueSupplier = i -> null; + StarTreeDocumentBitSetUtil.readBitSet(randomAccessInput, 0, dims1, identityValueSupplier); + assertNull(dims1[randomNullIndex1]); + assertNull(dims1[randomNullIndex2]); + in.close(); + + // test identity value on read + long randomLong = randomLong(); + identityValueSupplier = i -> randomLong; + in = fsDirectory.openInput(TEST_FILE, IOContext.DEFAULT); + + randomAccessInput = in.randomAccessSlice(0, in.length()); + StarTreeDocumentBitSetUtil.readBitSet(randomAccessInput, 0, dims1, identityValueSupplier); + assertEquals(randomLong, (long) dims1[randomNullIndex1]); + assertEquals(randomLong, (long) dims1[randomNullIndex2]); + in.close(); + } +} diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeDocumentsSorterTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeDocumentsSorterTests.java new file mode 100644 index 0000000000000..b485ea1a4fe3e --- /dev/null +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeDocumentsSorterTests.java @@ -0,0 +1,201 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube.startree.utils; + +import org.opensearch.common.Randomness; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Random; + +/** + * Tests for {@link StarTreeDocumentsSorter}. + */ +public class StarTreeDocumentsSorterTests extends OpenSearchTestCase { + private Map testData; + + @Before + public void setUp() throws Exception { + super.setUp(); + testData = new HashMap<>(); + testData.put(0, new Long[] { -1L, 2L, 3L }); + testData.put(1, new Long[] { 1L, 2L, 2L }); + testData.put(2, new Long[] { -1L, -1L, 3L }); + testData.put(3, new Long[] { 1L, 2L, null }); + testData.put(4, new Long[] { 1L, null, 3L }); + } + + public void testSortDocumentsOffHeap_FirstDimension() { + int[] sortedDocIds = { 0, 1, 2, 3, 4 }; + int dimensionId = -1; + int numDocs = 5; + + StarTreeDocumentsSorter.sort(sortedDocIds, dimensionId, numDocs, i -> testData.get(sortedDocIds[i])); + + assertArrayEquals(new int[] { 2, 0, 1, 3, 4 }, sortedDocIds); + } + + public void testSortDocumentsOffHeap_ThirdDimension() { + int[] sortedDocIds = { 0, 1, 2, 3, 4 }; + int dimensionId = 1; + int numDocs = 5; + + StarTreeDocumentsSorter.sort(sortedDocIds, dimensionId, numDocs, i -> testData.get(sortedDocIds[i])); + + assertArrayEquals(new int[] { 1, 0, 2, 4, 3 }, sortedDocIds); + } + + public void testSortDocumentsOffHeap_SingleElement() { + int[] sortedDocIds = { 0 }; + int dimensionId = -1; + int numDocs = 1; + + StarTreeDocumentsSorter.sort(sortedDocIds, dimensionId, numDocs, i -> testData.get(sortedDocIds[i])); + + assertArrayEquals(new int[] { 0 }, sortedDocIds); + } + + public void testSortDocumentsOffHeap_EmptyArray() { + int[] sortedDocIds = {}; + int dimensionId = -1; + int numDocs = 0; + + StarTreeDocumentsSorter.sort(sortedDocIds, dimensionId, numDocs, i -> testData.get(sortedDocIds[i])); + + assertArrayEquals(new int[] {}, sortedDocIds); + } + + public void testSortDocumentsOffHeap_SecondDimensionId() { + int[] sortedDocIds = { 0, 1, 2, 3, 4 }; + int dimensionId = 0; + int numDocs = 5; + + StarTreeDocumentsSorter.sort(sortedDocIds, dimensionId, numDocs, i -> testData.get(sortedDocIds[i])); + + assertArrayEquals(new int[] { 2, 1, 0, 3, 4 }, sortedDocIds); + } + + public void testSortDocumentsOffHeap_AllNulls() { + Map testData = new HashMap<>(); + testData.put(0, new Long[] { null, null, null }); + testData.put(1, new Long[] { null, null, null }); + testData.put(2, new Long[] { null, null, null }); + + int[] sortedDocIds = { 0, 1, 2 }; + int dimensionId = -1; + int numDocs = 3; + + StarTreeDocumentsSorter.sort(sortedDocIds, dimensionId, numDocs, i -> testData.get(sortedDocIds[i])); + + // The order should remain unchanged as all elements are equal (null) + assertArrayEquals(new int[] { 0, 1, 2 }, sortedDocIds); + } + + public void testSortDocumentsOffHeap_Negatives() { + Map testData = new HashMap<>(); + testData.put(0, new Long[] { -10L, 0L }); + testData.put(1, new Long[] { -9L, 0L }); + testData.put(2, new Long[] { -8L, 0L }); + testData.put(3, new Long[] { -7L, -0L }); + testData.put(4, new Long[] { -15L, -0L }); + + int[] sortedDocIds = { 0, 1, 2, 3, 4 }; + int dimensionId = -1; + int numDocs = 5; + + StarTreeDocumentsSorter.sort(sortedDocIds, dimensionId, numDocs, i -> testData.get(sortedDocIds[i])); + + // The order should remain unchanged as all elements are equal (null) + assertArrayEquals(new int[] { 4, 0, 1, 2, 3 }, sortedDocIds); + } + + public void testRandomSort() { + int i = 0; + while (i < 10) { + testRandomizedSort(); + i++; + } + } + + private void testRandomizedSort() { + + int numDocs = randomIntBetween(0, 1000); + Random random = Randomness.get(); + // skew more towards realistic number of dimensions + int numDimensions = random.nextBoolean() ? randomIntBetween(2, 10) : randomIntBetween(2, 100); + List testData = new ArrayList<>(); + // Generate random test data + for (int i = 0; i < numDocs; i++) { + Long[] dimensions = new Long[numDimensions]; + for (int j = 0; j < numDimensions; j++) { + if (random.nextFloat() < 0.5) { + dimensions[j] = random.nextBoolean() ? Long.valueOf(0L) : random.nextBoolean() ? -1L : null; + } else { + dimensions[j] = random.nextLong(); + } + } + testData.add(dimensions); + } + + int[] sortedDocIds = new int[numDocs]; + for (int i = 0; i < numDocs; i++) { + sortedDocIds[i] = i; + } + // sort dimensionId + 1 to numDimensions + // for example to start from dimension in 0th index, we need to pass -1 to sort method + int dimensionId = random.nextInt(numDimensions) - 1; + + // Sort using StarTreeDocumentsSorter + StarTreeDocumentsSorter.sort(sortedDocIds, dimensionId, numDocs, i -> testData.get(sortedDocIds[i])); + + // Verify the sorting + for (int i = 1; i < numDocs; i++) { + Long[] prev = testData.get(sortedDocIds[i - 1]); + Long[] curr = testData.get(sortedDocIds[i]); + boolean isCorrectOrder = true; + for (int j = dimensionId + 1; j < numDimensions; j++) { + int comparison = compareLongs(prev[j], curr[j]); + if (comparison < 0) { + break; + } else if (comparison > 0) { + isCorrectOrder = false; + break; + } + } + assertTrue( + "Sorting error when sorting from dimension index " + + dimensionId + + " Prev : " + + Arrays.toString(prev) + + " :: Curr : " + + Arrays.toString(curr), + isCorrectOrder + ); + } + } + + private int compareLongs(Long a, Long b) { + if (!Objects.equals(a, b)) { + if (a == null) { + return 1; + } else if (b == null) { + return -1; + } else { + return a.compareTo(b); + } + } + return 0; + } +} diff --git a/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java index 3144b1b007924..132d2ff5a566a 100644 --- a/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java @@ -69,7 +69,7 @@ public void testValidStarTree() throws IOException { List expectedMetrics = Arrays.asList(MetricStat.SUM, MetricStat.AVG); assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); assertEquals(100, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); - assertEquals(StarTreeFieldConfiguration.StarTreeBuildMode.ON_HEAP, starTreeFieldType.getStarTreeConfig().getBuildMode()); + assertEquals(StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP, starTreeFieldType.getStarTreeConfig().getBuildMode()); assertEquals( new HashSet<>(Arrays.asList("@timestamp", "status")), starTreeFieldType.getStarTreeConfig().getSkipStarNodeCreationInDims() @@ -101,7 +101,7 @@ public void testValidStarTreeDefaults() throws IOException { ); assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); assertEquals(10000, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); - assertEquals(StarTreeFieldConfiguration.StarTreeBuildMode.ON_HEAP, starTreeFieldType.getStarTreeConfig().getBuildMode()); + assertEquals(StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP, starTreeFieldType.getStarTreeConfig().getBuildMode()); assertEquals(Collections.emptySet(), starTreeFieldType.getStarTreeConfig().getSkipStarNodeCreationInDims()); } } From 97c1bf01ff511c4db74dc8a81045447b009bec29 Mon Sep 17 00:00:00 2001 From: Kiran Prakash Date: Wed, 7 Aug 2024 09:39:07 -0700 Subject: [PATCH 65/68] QueryGroup Resource Tracking framework and implementation (#13897) * initial code for the sandbox resource tracking and cancellation framework Signed-off-by: Kiran Prakash * Fix Failing Tests Signed-off-by: Kiran Prakash * spotless Apply Signed-off-by: Kiran Prakash * Update SandboxService.java Signed-off-by: Kiran Prakash * Update SandboxService.java Signed-off-by: Kiran Prakash * Update SandboxTask.java Signed-off-by: Kiran Prakash * Add java docs Signed-off-by: Kiran Prakash * spotless Signed-off-by: Kiran Prakash * javadocs Signed-off-by: Kiran Prakash * javadocs Signed-off-by: Kiran Prakash * java docs Signed-off-by: Kiran Prakash * Update AbstractTaskCancellation.java Signed-off-by: Kiran Prakash * Update SandboxModule.java Signed-off-by: Kiran Prakash * Some tests and stubs Signed-off-by: Kiran Prakash * spotless Signed-off-by: Kiran Prakash * :server:testingConventions Signed-off-by: Kiran Prakash * Update AbstractTaskCancellation.java Signed-off-by: Kiran Prakash * more tests Signed-off-by: Kiran Prakash * addressing comments Signed-off-by: Kiran Prakash * revert some accidentally pushed files Signed-off-by: Kiran Prakash * resolve flakiness Signed-off-by: Kiran Prakash * renaming sandbox to querygroup and adjusting code based on merged PRs Signed-off-by: Kiran Prakash * jvm to memory Signed-off-by: Kiran Prakash * missing java docs Signed-off-by: Kiran Prakash * spotless Signed-off-by: Kiran Prakash * Update CHANGELOG.md Signed-off-by: Kiran Prakash * pluck cancellation changes out of this PR Signed-off-by: Kiran Prakash * remove unused Signed-off-by: Kiran Prakash * remove cancellation related code and add more tests coverage Signed-off-by: Kiran Prakash * us only memory and not jvm Signed-off-by: Kiran Prakash * test conventions Signed-off-by: Kiran Prakash * Bring back enum Signed-off-by: Kiran Prakash * Update SearchBackpressureService.java Signed-off-by: Kiran Prakash * revert changes Signed-off-by: Kiran Prakash * revert changes Signed-off-by: Kiran Prakash * all required changes Signed-off-by: Kiran Prakash * Update CHANGELOG.md Signed-off-by: Kiran Prakash * cleanups Signed-off-by: Kiran Prakash * Delete QueryGroupService.java Signed-off-by: Kiran Prakash * cleanups Signed-off-by: Kiran Prakash * Update QueryGroupLevelResourceUsageViewTests.java Signed-off-by: Kiran Prakash * Update QueryGroupLevelResourceUsageViewTests.java Signed-off-by: Kiran Prakash * Update QueryGroupResourceUsageTrackerService.java Signed-off-by: Kiran Prakash * Update QueryGroupResourceUsageTrackerService.java Signed-off-by: Kiran Prakash * Update QueryGroupResourceUsageTrackerService.java Signed-off-by: Kiran Prakash * Update CHANGELOG.md Signed-off-by: Kiran Prakash * rebasing with latest main Signed-off-by: Kiran Prakash * remove experimental Signed-off-by: Kiran Prakash * remove queryGroupId Signed-off-by: Kiran Prakash * Update QueryGroupResourceUsageTrackerService.java Signed-off-by: Kiran Prakash * change code comments Signed-off-by: Kiran Prakash * remmove QueryGroupUsageTracker Signed-off-by: Kiran Prakash * Update QueryGroupResourceUsageTrackerService.java Signed-off-by: Kiran Prakash * Update QueryGroupResourceUsageTrackerService.java Signed-off-by: Kiran Prakash * remove QueryGroupTestHelpers Signed-off-by: Kiran Prakash * cleanups Signed-off-by: Kiran Prakash * remove queryGroupHelper Signed-off-by: Kiran Prakash * Update ResourceTypeTests.java Signed-off-by: Kiran Prakash * extend OpenSearchTestCase Signed-off-by: Kiran Prakash * pr comments Signed-off-by: Kiran Prakash * Update CHANGELOG.md Signed-off-by: Kiran Prakash * Update QueryGroupResourceUsageTrackerServiceTests.java Signed-off-by: Kiran Prakash * Update ResourceTypeTests.java Signed-off-by: Kiran Prakash * Update ResourceTypeTests.java Signed-off-by: Kiran Prakash * Update ResourceType.java Signed-off-by: Kiran Prakash * Update ResourceType.java Signed-off-by: Kiran Prakash --------- Signed-off-by: Kiran Prakash --- CHANGELOG.md | 1 + .../opensearch/cluster/metadata/Metadata.java | 2 +- .../org/opensearch/search/ResourceType.java | 21 ++- .../wlm/QueryGroupLevelResourceUsageView.java | 50 +++++++ ...QueryGroupResourceUsageTrackerService.java | 84 ++++++++++++ .../opensearch/wlm/tracker/package-info.java | 12 ++ .../opensearch/search/ResourceTypeTests.java | 52 ++++++++ ...QueryGroupLevelResourceUsageViewTests.java | 64 +++++++++ ...GroupResourceUsageTrackerServiceTests.java | 126 ++++++++++++++++++ 9 files changed, 408 insertions(+), 4 deletions(-) create mode 100644 server/src/main/java/org/opensearch/wlm/QueryGroupLevelResourceUsageView.java create mode 100644 server/src/main/java/org/opensearch/wlm/tracker/QueryGroupResourceUsageTrackerService.java create mode 100644 server/src/main/java/org/opensearch/wlm/tracker/package-info.java create mode 100644 server/src/test/java/org/opensearch/search/ResourceTypeTests.java create mode 100644 server/src/test/java/org/opensearch/wlm/QueryGroupLevelResourceUsageViewTests.java create mode 100644 server/src/test/java/org/opensearch/wlm/tracker/QueryGroupResourceUsageTrackerServiceTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index be5e5598b09c2..3e83a5bf9b4cb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add ThreadContextPermission for stashAndMergeHeaders and stashWithOrigin ([#15039](https://github.com/opensearch-project/OpenSearch/pull/15039)) - [Concurrent Segment Search] Support composite aggregations with scripting ([#15072](https://github.com/opensearch-project/OpenSearch/pull/15072)) - Add `rangeQuery` and `regexpQuery` for `constant_keyword` field type ([#14711](https://github.com/opensearch-project/OpenSearch/pull/14711)) +- [Workload Management] QueryGroup resource tracking framework changes ([#13897](https://github.com/opensearch-project/OpenSearch/pull/13897)) ### Dependencies - Bump `netty` from 4.1.111.Final to 4.1.112.Final ([#15081](https://github.com/opensearch-project/OpenSearch/pull/15081)) diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java index 440b9e267cf0a..09bef2ddf9ee6 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java @@ -1391,7 +1391,7 @@ public Builder put(final QueryGroup queryGroup) { return queryGroups(existing); } - private Map getQueryGroups() { + public Map getQueryGroups() { return Optional.ofNullable(this.customs.get(QueryGroupMetadata.TYPE)) .map(o -> (QueryGroupMetadata) o) .map(QueryGroupMetadata::queryGroups) diff --git a/server/src/main/java/org/opensearch/search/ResourceType.java b/server/src/main/java/org/opensearch/search/ResourceType.java index fe5ce4dd2bb50..0cba2222a6e20 100644 --- a/server/src/main/java/org/opensearch/search/ResourceType.java +++ b/server/src/main/java/org/opensearch/search/ResourceType.java @@ -10,21 +10,26 @@ import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.tasks.resourcetracker.ResourceStats; +import org.opensearch.tasks.Task; import java.io.IOException; +import java.util.function.Function; /** * Enum to hold the resource type */ @PublicApi(since = "2.x") public enum ResourceType { - CPU("cpu"), - MEMORY("memory"); + CPU("cpu", task -> task.getTotalResourceUtilization(ResourceStats.CPU)), + MEMORY("memory", task -> task.getTotalResourceUtilization(ResourceStats.MEMORY)); private final String name; + private final Function getResourceUsage; - ResourceType(String name) { + ResourceType(String name, Function getResourceUsage) { this.name = name; + this.getResourceUsage = getResourceUsage; } /** @@ -48,4 +53,14 @@ public static void writeTo(StreamOutput out, ResourceType resourceType) throws I public String getName() { return name; } + + /** + * Gets the resource usage for a given resource type and task. + * + * @param task the task for which to calculate resource usage + * @return the resource usage + */ + public long getResourceUsage(Task task) { + return getResourceUsage.apply(task); + } } diff --git a/server/src/main/java/org/opensearch/wlm/QueryGroupLevelResourceUsageView.java b/server/src/main/java/org/opensearch/wlm/QueryGroupLevelResourceUsageView.java new file mode 100644 index 0000000000000..2fd743dc3f83f --- /dev/null +++ b/server/src/main/java/org/opensearch/wlm/QueryGroupLevelResourceUsageView.java @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.wlm; + +import org.opensearch.search.ResourceType; +import org.opensearch.tasks.Task; + +import java.util.List; +import java.util.Map; + +/** + * Represents the point in time view of resource usage of a QueryGroup and + * has a 1:1 relation with a QueryGroup. + * This class holds the resource usage data and the list of active tasks. + */ +public class QueryGroupLevelResourceUsageView { + // resourceUsage holds the resource usage data for a QueryGroup at a point in time + private final Map resourceUsage; + // activeTasks holds the list of active tasks for a QueryGroup at a point in time + private final List activeTasks; + + public QueryGroupLevelResourceUsageView(Map resourceUsage, List activeTasks) { + this.resourceUsage = resourceUsage; + this.activeTasks = activeTasks; + } + + /** + * Returns the resource usage data. + * + * @return The map of resource usage data + */ + public Map getResourceUsageData() { + return resourceUsage; + } + + /** + * Returns the list of active tasks. + * + * @return The list of active tasks + */ + public List getActiveTasks() { + return activeTasks; + } +} diff --git a/server/src/main/java/org/opensearch/wlm/tracker/QueryGroupResourceUsageTrackerService.java b/server/src/main/java/org/opensearch/wlm/tracker/QueryGroupResourceUsageTrackerService.java new file mode 100644 index 0000000000000..bfbf5d8a452d1 --- /dev/null +++ b/server/src/main/java/org/opensearch/wlm/tracker/QueryGroupResourceUsageTrackerService.java @@ -0,0 +1,84 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.wlm.tracker; + +import org.opensearch.search.ResourceType; +import org.opensearch.tasks.Task; +import org.opensearch.tasks.TaskResourceTrackingService; +import org.opensearch.wlm.QueryGroupLevelResourceUsageView; +import org.opensearch.wlm.QueryGroupTask; + +import java.util.EnumMap; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * This class tracks resource usage per QueryGroup + */ +public class QueryGroupResourceUsageTrackerService { + + public static final EnumSet TRACKED_RESOURCES = EnumSet.allOf(ResourceType.class); + private final TaskResourceTrackingService taskResourceTrackingService; + + /** + * QueryGroupResourceTrackerService constructor + * + * @param taskResourceTrackingService Service that helps track resource usage of tasks running on a node. + */ + public QueryGroupResourceUsageTrackerService(TaskResourceTrackingService taskResourceTrackingService) { + this.taskResourceTrackingService = taskResourceTrackingService; + } + + /** + * Constructs a map of QueryGroupLevelResourceUsageView instances for each QueryGroup. + * + * @return Map of QueryGroup views + */ + public Map constructQueryGroupLevelUsageViews() { + final Map> tasksByQueryGroup = getTasksGroupedByQueryGroup(); + final Map queryGroupViews = new HashMap<>(); + + // Iterate over each QueryGroup entry + for (Map.Entry> queryGroupEntry : tasksByQueryGroup.entrySet()) { + // Compute the QueryGroup usage + final EnumMap queryGroupUsage = new EnumMap<>(ResourceType.class); + for (ResourceType resourceType : TRACKED_RESOURCES) { + long queryGroupResourceUsage = 0; + for (Task task : queryGroupEntry.getValue()) { + queryGroupResourceUsage += resourceType.getResourceUsage(task); + } + queryGroupUsage.put(resourceType, queryGroupResourceUsage); + } + + // Add to the QueryGroup View + queryGroupViews.put( + queryGroupEntry.getKey(), + new QueryGroupLevelResourceUsageView(queryGroupUsage, queryGroupEntry.getValue()) + ); + } + return queryGroupViews; + } + + /** + * Groups tasks by their associated QueryGroup. + * + * @return Map of tasks grouped by QueryGroup + */ + private Map> getTasksGroupedByQueryGroup() { + return taskResourceTrackingService.getResourceAwareTasks() + .values() + .stream() + .filter(QueryGroupTask.class::isInstance) + .map(QueryGroupTask.class::cast) + .collect(Collectors.groupingBy(QueryGroupTask::getQueryGroupId, Collectors.mapping(task -> (Task) task, Collectors.toList()))); + } +} diff --git a/server/src/main/java/org/opensearch/wlm/tracker/package-info.java b/server/src/main/java/org/opensearch/wlm/tracker/package-info.java new file mode 100644 index 0000000000000..86efc99355d3d --- /dev/null +++ b/server/src/main/java/org/opensearch/wlm/tracker/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * QueryGroup resource tracking artifacts + */ +package org.opensearch.wlm.tracker; diff --git a/server/src/test/java/org/opensearch/search/ResourceTypeTests.java b/server/src/test/java/org/opensearch/search/ResourceTypeTests.java new file mode 100644 index 0000000000000..78827b8b1bdad --- /dev/null +++ b/server/src/test/java/org/opensearch/search/ResourceTypeTests.java @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search; + +import org.opensearch.action.search.SearchShardTask; +import org.opensearch.core.tasks.resourcetracker.ResourceStats; +import org.opensearch.tasks.CancellableTask; +import org.opensearch.test.OpenSearchTestCase; + +import static org.junit.Assert.assertThrows; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class ResourceTypeTests extends OpenSearchTestCase { + + public void testFromName() { + assertSame(ResourceType.CPU, ResourceType.fromName("cpu")); + assertThrows(IllegalArgumentException.class, () -> { ResourceType.fromName("CPU"); }); + assertThrows(IllegalArgumentException.class, () -> { ResourceType.fromName("Cpu"); }); + + assertSame(ResourceType.MEMORY, ResourceType.fromName("memory")); + assertThrows(IllegalArgumentException.class, () -> { ResourceType.fromName("Memory"); }); + assertThrows(IllegalArgumentException.class, () -> { ResourceType.fromName("MEMORY"); }); + assertThrows(IllegalArgumentException.class, () -> { ResourceType.fromName("JVM"); }); + assertThrows(IllegalArgumentException.class, () -> { ResourceType.fromName("Heap"); }); + assertThrows(IllegalArgumentException.class, () -> { ResourceType.fromName("Disk"); }); + } + + public void testGetName() { + assertEquals("cpu", ResourceType.CPU.getName()); + assertEquals("memory", ResourceType.MEMORY.getName()); + } + + public void testGetResourceUsage() { + SearchShardTask mockTask = createMockTask(SearchShardTask.class, 100, 200); + assertEquals(100, ResourceType.CPU.getResourceUsage(mockTask)); + assertEquals(200, ResourceType.MEMORY.getResourceUsage(mockTask)); + } + + private T createMockTask(Class type, long cpuUsage, long heapUsage) { + T task = mock(type); + when(task.getTotalResourceUtilization(ResourceStats.CPU)).thenReturn(cpuUsage); + when(task.getTotalResourceUtilization(ResourceStats.MEMORY)).thenReturn(heapUsage); + return task; + } +} diff --git a/server/src/test/java/org/opensearch/wlm/QueryGroupLevelResourceUsageViewTests.java b/server/src/test/java/org/opensearch/wlm/QueryGroupLevelResourceUsageViewTests.java new file mode 100644 index 0000000000000..7f6419505fec2 --- /dev/null +++ b/server/src/test/java/org/opensearch/wlm/QueryGroupLevelResourceUsageViewTests.java @@ -0,0 +1,64 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.wlm; + +import org.opensearch.action.search.SearchAction; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.search.ResourceType; +import org.opensearch.tasks.Task; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Collections; +import java.util.List; +import java.util.Map; + +public class QueryGroupLevelResourceUsageViewTests extends OpenSearchTestCase { + Map resourceUsage; + List activeTasks; + + public void setUp() throws Exception { + super.setUp(); + resourceUsage = Map.of(ResourceType.fromName("memory"), 34L, ResourceType.fromName("cpu"), 12L); + activeTasks = List.of(getRandomTask(4321)); + } + + public void testGetResourceUsageData() { + QueryGroupLevelResourceUsageView queryGroupLevelResourceUsageView = new QueryGroupLevelResourceUsageView( + resourceUsage, + activeTasks + ); + Map resourceUsageData = queryGroupLevelResourceUsageView.getResourceUsageData(); + assertTrue(assertResourceUsageData(resourceUsageData)); + } + + public void testGetActiveTasks() { + QueryGroupLevelResourceUsageView queryGroupLevelResourceUsageView = new QueryGroupLevelResourceUsageView( + resourceUsage, + activeTasks + ); + List activeTasks = queryGroupLevelResourceUsageView.getActiveTasks(); + assertEquals(1, activeTasks.size()); + assertEquals(4321, activeTasks.get(0).getId()); + } + + private boolean assertResourceUsageData(Map resourceUsageData) { + return resourceUsageData.get(ResourceType.fromName("memory")) == 34L && resourceUsageData.get(ResourceType.fromName("cpu")) == 12L; + } + + private Task getRandomTask(long id) { + return new Task( + id, + "transport", + SearchAction.NAME, + "test description", + new TaskId(randomLong() + ":" + randomLong()), + Collections.emptyMap() + ); + } +} diff --git a/server/src/test/java/org/opensearch/wlm/tracker/QueryGroupResourceUsageTrackerServiceTests.java b/server/src/test/java/org/opensearch/wlm/tracker/QueryGroupResourceUsageTrackerServiceTests.java new file mode 100644 index 0000000000000..967119583c25f --- /dev/null +++ b/server/src/test/java/org/opensearch/wlm/tracker/QueryGroupResourceUsageTrackerServiceTests.java @@ -0,0 +1,126 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.wlm.tracker; + +import org.opensearch.action.search.SearchShardTask; +import org.opensearch.action.search.SearchTask; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.tasks.resourcetracker.ResourceStats; +import org.opensearch.search.ResourceType; +import org.opensearch.tasks.CancellableTask; +import org.opensearch.tasks.Task; +import org.opensearch.tasks.TaskResourceTrackingService; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.wlm.QueryGroupLevelResourceUsageView; +import org.opensearch.wlm.QueryGroupTask; +import org.junit.After; +import org.junit.Before; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.opensearch.wlm.QueryGroupTask.QUERY_GROUP_ID_HEADER; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class QueryGroupResourceUsageTrackerServiceTests extends OpenSearchTestCase { + TestThreadPool threadPool; + TaskResourceTrackingService mockTaskResourceTrackingService; + QueryGroupResourceUsageTrackerService queryGroupResourceUsageTrackerService; + + @Before + public void setup() { + threadPool = new TestThreadPool(getTestName()); + mockTaskResourceTrackingService = mock(TaskResourceTrackingService.class); + queryGroupResourceUsageTrackerService = new QueryGroupResourceUsageTrackerService(mockTaskResourceTrackingService); + } + + @After + public void cleanup() { + ThreadPool.terminate(threadPool, 5, TimeUnit.SECONDS); + } + + public void testConstructQueryGroupLevelViews_CreatesQueryGroupLevelUsageView_WhenTasksArePresent() { + List queryGroupIds = List.of("queryGroup1", "queryGroup2", "queryGroup3"); + + Map activeSearchShardTasks = createActiveSearchShardTasks(queryGroupIds); + when(mockTaskResourceTrackingService.getResourceAwareTasks()).thenReturn(activeSearchShardTasks); + Map stringQueryGroupLevelResourceUsageViewMap = queryGroupResourceUsageTrackerService + .constructQueryGroupLevelUsageViews(); + + for (String queryGroupId : queryGroupIds) { + assertEquals( + 400, + (long) stringQueryGroupLevelResourceUsageViewMap.get(queryGroupId).getResourceUsageData().get(ResourceType.MEMORY) + ); + assertEquals(2, stringQueryGroupLevelResourceUsageViewMap.get(queryGroupId).getActiveTasks().size()); + } + } + + public void testConstructQueryGroupLevelViews_CreatesQueryGroupLevelUsageView_WhenTasksAreNotPresent() { + Map stringQueryGroupLevelResourceUsageViewMap = queryGroupResourceUsageTrackerService + .constructQueryGroupLevelUsageViews(); + assertTrue(stringQueryGroupLevelResourceUsageViewMap.isEmpty()); + } + + public void testConstructQueryGroupLevelUsageViews_WithTasksHavingDifferentResourceUsage() { + Map activeSearchShardTasks = new HashMap<>(); + activeSearchShardTasks.put(1L, createMockTask(SearchShardTask.class, 100, 200, "queryGroup1")); + activeSearchShardTasks.put(2L, createMockTask(SearchShardTask.class, 200, 400, "queryGroup1")); + when(mockTaskResourceTrackingService.getResourceAwareTasks()).thenReturn(activeSearchShardTasks); + + Map queryGroupViews = queryGroupResourceUsageTrackerService + .constructQueryGroupLevelUsageViews(); + + assertEquals(600, (long) queryGroupViews.get("queryGroup1").getResourceUsageData().get(ResourceType.MEMORY)); + assertEquals(2, queryGroupViews.get("queryGroup1").getActiveTasks().size()); + } + + private Map createActiveSearchShardTasks(List queryGroupIds) { + Map activeSearchShardTasks = new HashMap<>(); + long task_id = 0; + for (String queryGroupId : queryGroupIds) { + for (int i = 0; i < 2; i++) { + activeSearchShardTasks.put(++task_id, createMockTask(SearchShardTask.class, 100, 200, queryGroupId)); + } + } + return activeSearchShardTasks; + } + + private T createMockTask(Class type, long cpuUsage, long heapUsage, String queryGroupId) { + T task = mock(type); + if (task instanceof SearchTask || task instanceof SearchShardTask) { + // Stash the current thread context to ensure that any existing context is preserved and restored after setting the query group + // ID. + try (ThreadContext.StoredContext ignore = threadPool.getThreadContext().stashContext()) { + threadPool.getThreadContext().putHeader(QUERY_GROUP_ID_HEADER, queryGroupId); + ((QueryGroupTask) task).setQueryGroupId(threadPool.getThreadContext()); + } + } + when(task.getTotalResourceUtilization(ResourceStats.CPU)).thenReturn(cpuUsage); + when(task.getTotalResourceUtilization(ResourceStats.MEMORY)).thenReturn(heapUsage); + when(task.getStartTimeNanos()).thenReturn((long) 0); + + AtomicBoolean isCancelled = new AtomicBoolean(false); + doAnswer(invocation -> { + isCancelled.set(true); + return null; + }).when(task).cancel(anyString()); + doAnswer(invocation -> isCancelled.get()).when(task).isCancelled(); + + return task; + } +} From 348c04e7a32e13ea040a1d2e0459c03da9ec0c2c Mon Sep 17 00:00:00 2001 From: Jay Deng Date: Wed, 7 Aug 2024 11:40:52 -0700 Subject: [PATCH 66/68] Fix CHANGELOG for #15054 (#15150) Signed-off-by: Jay Deng --- CHANGELOG-3.0.md | 1 - CHANGELOG.md | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index 78e93eed0158a..48d978bede420 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -13,7 +13,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - GHA to verify checklist items completion in PR descriptions ([#10800](https://github.com/opensearch-project/OpenSearch/pull/10800)) - Allow to pass the list settings through environment variables (like [], ["a", "b", "c"], ...) ([#10625](https://github.com/opensearch-project/OpenSearch/pull/10625)) - Views, simplify data access and manipulation by providing a virtual layer over one or more indices ([#11957](https://github.com/opensearch-project/OpenSearch/pull/11957)) -- Add took time to request nodes stats ([#15054](https://github.com/opensearch-project/OpenSearch/pull/15054)) ### Dependencies diff --git a/CHANGELOG.md b/CHANGELOG.md index 3e83a5bf9b4cb..f44949bf38511 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add ThreadContextPermission for stashAndMergeHeaders and stashWithOrigin ([#15039](https://github.com/opensearch-project/OpenSearch/pull/15039)) - [Concurrent Segment Search] Support composite aggregations with scripting ([#15072](https://github.com/opensearch-project/OpenSearch/pull/15072)) - Add `rangeQuery` and `regexpQuery` for `constant_keyword` field type ([#14711](https://github.com/opensearch-project/OpenSearch/pull/14711)) +- Add took time to request nodes stats ([#15054](https://github.com/opensearch-project/OpenSearch/pull/15054)) - [Workload Management] QueryGroup resource tracking framework changes ([#13897](https://github.com/opensearch-project/OpenSearch/pull/13897)) ### Dependencies From 7f72a6e6580e42740a938c35dc00a6f88e6089df Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Wed, 7 Aug 2024 15:48:31 -0500 Subject: [PATCH 67/68] Add 'ShardManagement:*' labels to Cluster Manager triage search (#14234) Signed-off-by: Andrew Ross --- TRIAGING.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/TRIAGING.md b/TRIAGING.md index 53ef77de49159..dddcbc15394ab 100644 --- a/TRIAGING.md +++ b/TRIAGING.md @@ -31,8 +31,8 @@ Meeting structure may vary slightly, but the general structure is as follows: - [Search](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+label%3A%22Search%22%2C%22Search%3ARemote+Search%22%2C%22Search%3AResiliency%22%2C%22Search%3APerformance%22%2C%22Search%3ARelevance%22%2C%22Search%3AAggregations%22%2C%22Search%3AQuery+Capabilities%22%2C%22Search%3AQuery+Insights%22%2C%22Search%3ASearchable+Snapshots%22%2C%22Search%3AUser+Behavior+Insights%22) - [Indexing](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+label%3A%22Indexing%3AReplication%22%2C%22Indexing%22%2C%22Indexing%3APerformance%22%2C%22Indexing+%26+Search%22%2C) - [Storage](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+label%3AStorage%2C%22Storage%3AResiliency%22%2C%22Storage%3APerformance%22%2C%22Storage%3ASnapshots%22%2C%22Storage%3ARemote%22%2C%22Storage%3ADurability%22) - - [Cluster Manager](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+label%3A%22Cluster+Manager%22%2C%22ClusterManager%3ARemoteState%22) - - [Core](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+-label%3A%22Search%22%2C%22Search%3ARemote+Search%22%2C%22Search%3AResiliency%22%2C%22Search%3APerformance%22%2C%22Search%3ARelevance%22%2C%22Search%3AAggregations%22%2C%22Search%3AQuery+Capabilities%22%2C%22Search%3AQuery+Insights%22%2C%22Search%3ASearchable+Snapshots%22%2C%22Search%3AUser+Behavior+Insights%22%2C%22Storage%22%2C%22Storage%3AResiliency%22%2C%22Storage%3APerformance%22%2C%22Storage%3ASnapshots%22%2C%22Storage%3ARemote%22%2C%22Storage%3ADurability%22%2C%22Cluster+Manager%22%2C%22ClusterManager%3ARemoteState%22%2C%22Indexing%3AReplication%22%2C%22Indexing%22%2C%22Indexing%3APerformance%22%2C%22Indexing+%26+Search%22) + - [Cluster Manager](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+label%3A%22Cluster+Manager%22%2C%22ClusterManager%3ARemoteState%22%2C%22ShardManagement%3AResiliency%22%2C%22ShardManagement%3AInsights%22%2C%22ShardManagement%3ASizing%22%2C%22ShardManagement%3APerformance%22%2C%22ShardManagement%3APlacement%22%2C%22ShardManagement%3ARouting%22) + - [Core](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+-label%3A%22Search%22%2C%22Search%3ARemote+Search%22%2C%22Search%3AResiliency%22%2C%22Search%3APerformance%22%2C%22Search%3ARelevance%22%2C%22Search%3AAggregations%22%2C%22Search%3AQuery+Capabilities%22%2C%22Search%3AQuery+Insights%22%2C%22Search%3ASearchable+Snapshots%22%2C%22Search%3AUser+Behavior+Insights%22%2C%22Storage%22%2C%22Storage%3AResiliency%22%2C%22Storage%3APerformance%22%2C%22Storage%3ASnapshots%22%2C%22Storage%3ARemote%22%2C%22Storage%3ADurability%22%2C%22Cluster+Manager%22%2C%22ClusterManager%3ARemoteState%22%2C%22ShardManagement%3AResiliency%22%2C%22ShardManagement%3AInsights%22%2C%22ShardManagement%3ASizing%22%2C%22ShardManagement%3APerformance%22%2C%22ShardManagement%3APlacement%22%2C%22ShardManagement%3ARouting%22%2C%22Indexing%3AReplication%22%2C%22Indexing%22%2C%22Indexing%3APerformance%22%2C%22Indexing+%26+Search%22) 5. **Attendee Requests:** An opportunity for any meeting member to request consideration of an issue or pull request. 6. **Open Discussion:** Attendees can bring up any topics not already covered by filed issues or pull requests. 7. **Review of Old Untriaged Issues:** Look at all [untriaged issues older than 14 days](https://peternied.github.io/redirect/issue_search.html?owner=opensearch-project&repo=OpenSearch&tag=untriaged&created-since-days=14) to prevent issues from falling through the cracks. From d99f55f1cfaa9a0b52aa3b19d87fb6c45ab7a288 Mon Sep 17 00:00:00 2001 From: Neetika Singhal Date: Thu, 11 Jul 2024 12:57:20 -0700 Subject: [PATCH 68/68] HotToWarmTieringService changes to tier shards Signed-off-by: Neetika Singhal --- CHANGELOG.md | 1 + .../tiering/HotToWarmTieringServiceIT.java | 114 ++++++ .../tiering/TieringBaseIntegTestCase.java | 102 +++++ .../tiering/TieringRequestContext.java | 104 ++++++ .../TieringUpdateClusterStateRequest.java | 40 ++ .../admin/indices/tiering/TieringUtils.java | 39 ++ .../tiering/TieringValidationResult.java | 10 - .../TransportHotToWarmTieringAction.java | 30 +- .../cluster/metadata/IndexMetadata.java | 13 + .../metadata/IndexNameExpressionResolver.java | 31 ++ .../tiering/HotToWarmTieringService.java | 348 ++++++++++++++++++ .../tiering/TieringRequestValidator.java | 22 +- .../main/java/org/opensearch/node/Node.java | 11 + .../TransportHotToWarmTieringActionTests.java | 2 +- .../IndexNameExpressionResolverTests.java | 40 ++ .../tiering/HotToWarmTieringServiceTests.java | 74 ++++ .../tiering/TieringRequestValidatorTests.java | 22 -- 17 files changed, 945 insertions(+), 58 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/opensearch/tiering/HotToWarmTieringServiceIT.java create mode 100644 server/src/internalClusterTest/java/org/opensearch/tiering/TieringBaseIntegTestCase.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/tiering/TieringRequestContext.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/tiering/TieringUpdateClusterStateRequest.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/tiering/TieringUtils.java create mode 100644 server/src/main/java/org/opensearch/indices/tiering/HotToWarmTieringService.java create mode 100644 server/src/test/java/org/opensearch/indices/tiering/HotToWarmTieringServiceTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index f44949bf38511..bd456fb30447e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add `rangeQuery` and `regexpQuery` for `constant_keyword` field type ([#14711](https://github.com/opensearch-project/OpenSearch/pull/14711)) - Add took time to request nodes stats ([#15054](https://github.com/opensearch-project/OpenSearch/pull/15054)) - [Workload Management] QueryGroup resource tracking framework changes ([#13897](https://github.com/opensearch-project/OpenSearch/pull/13897)) +- HotToWarmTieringService changes to tier shards ([#14891](https://github.com/opensearch-project/OpenSearch/pull/14891)) ### Dependencies - Bump `netty` from 4.1.111.Final to 4.1.112.Final ([#15081](https://github.com/opensearch-project/OpenSearch/pull/15081)) diff --git a/server/src/internalClusterTest/java/org/opensearch/tiering/HotToWarmTieringServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/tiering/HotToWarmTieringServiceIT.java new file mode 100644 index 0000000000000..4ad8d87803bae --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/tiering/HotToWarmTieringServiceIT.java @@ -0,0 +1,114 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.tiering; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.opensearch.action.admin.indices.get.GetIndexResponse; +import org.opensearch.action.admin.indices.tiering.HotToWarmTieringAction; +import org.opensearch.action.admin.indices.tiering.HotToWarmTieringResponse; +import org.opensearch.action.admin.indices.tiering.TieringIndexRequest; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.cluster.MockInternalClusterInfoService; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.index.IndexModule; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.index.store.remote.file.CleanerDaemonThreadLeakFilter; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.Before; + +import java.util.Map; + +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; + +@ThreadLeakFilters(filters = CleanerDaemonThreadLeakFilter.class) +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0, supportsDedicatedMasters = false) +// Uncomment the below line to enable trace level logs for this test for better debugging +// @TestLogging(reason = "Getting trace logs from tiering package", value = +// "org.opensearch.tiering:TRACE,org.opensearch.cluster.routing.allocation.decider:TRACE") +public class HotToWarmTieringServiceIT extends TieringBaseIntegTestCase { + + protected static final String TEST_IDX_1 = "test-idx-1"; + protected static final String TEST_IDX_2 = "test-idx-2"; + protected static final int NUM_DOCS_IN_BULK = 10; + private static final long TOTAL_SPACE_BYTES = new ByteSizeValue(1000, ByteSizeUnit.KB).getBytes(); + + @Before + public void setup() { + internalCluster().startClusterManagerOnlyNode(); + } + + // waiting for the recovery pr to be merged in + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/13647") + public void testTieringBasic() { + final int numReplicasIndex = 0; + internalCluster().ensureAtLeastNumDataNodes(1); + final Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicasIndex) + .put(IndexModule.INDEX_STORE_LOCALITY_SETTING.getKey(), IndexModule.DataLocalityType.FULL.name()) + .build(); + + String[] indices = new String[] { TEST_IDX_1, TEST_IDX_2 }; + for (String index : indices) { + assertAcked(client().admin().indices().prepareCreate(index).setSettings(settings).get()); + ensureGreen(index); + // Ingesting some docs + indexBulk(index, NUM_DOCS_IN_BULK); + flushAndRefresh(index); + ensureGreen(); + SearchResponse searchResponse = client().prepareSearch(index).setQuery(QueryBuilders.matchAllQuery()).get(); + // Asserting that search returns same number of docs as ingested + assertHitCount(searchResponse, NUM_DOCS_IN_BULK); + } + + // Spin up node having search role + internalCluster().ensureAtLeastNumSearchAndDataNodes(1); + + final MockInternalClusterInfoService clusterInfoService = getMockInternalClusterInfoService(); + clusterInfoService.setDiskUsageFunctionAndRefresh( + (discoveryNode, fsInfoPath) -> setDiskUsage(fsInfoPath, TOTAL_SPACE_BYTES, TOTAL_SPACE_BYTES) + ); + + TieringIndexRequest request = new TieringIndexRequest(TARGET_WARM_TIER, indices); + request.waitForCompletion(true); + HotToWarmTieringResponse response = client().admin().indices().execute(HotToWarmTieringAction.INSTANCE, request).actionGet(); + assertAcked(response); + assertTrue(response.getFailedIndices().isEmpty()); + assertTrue(response.isAcknowledged()); + ensureGreen(); + for (String index : indices) { + SearchResponse searchResponse = client().prepareSearch(index).setQuery(QueryBuilders.matchAllQuery()).get(); + // Asserting that search returns same number of docs as ingested + assertHitCount(searchResponse, NUM_DOCS_IN_BULK); + GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex().addIndices(index).get(); + assertWarmSettings(getIndexResponse, index); + assertAcked(client().admin().indices().prepareDelete(index).get()); + } + } + + private void assertWarmSettings(GetIndexResponse response, String indexName) { + final Map settings = response.settings(); + assertThat(settings, notNullValue()); + assertThat(settings.size(), equalTo(1)); + Settings indexSettings = settings.get(indexName); + assertThat(indexSettings, notNullValue()); + assertThat( + indexSettings.get(IndexModule.INDEX_STORE_LOCALITY_SETTING.getKey()), + equalTo(IndexModule.DataLocalityType.PARTIAL.name()) + ); + assertThat(indexSettings.get(IndexModule.INDEX_TIERING_STATE.getKey()), equalTo(IndexModule.TieringState.WARM.name())); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/tiering/TieringBaseIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/tiering/TieringBaseIntegTestCase.java new file mode 100644 index 0000000000000..0b60e71480315 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/tiering/TieringBaseIntegTestCase.java @@ -0,0 +1,102 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.tiering; + +import org.opensearch.action.bulk.BulkRequest; +import org.opensearch.action.bulk.BulkResponse; +import org.opensearch.action.index.IndexRequest; +import org.opensearch.cluster.ClusterInfoService; +import org.opensearch.cluster.MockInternalClusterInfoService; +import org.opensearch.common.UUIDs; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.monitor.fs.FsInfo; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.nio.file.Path; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; + +public class TieringBaseIntegTestCase extends OpenSearchIntegTestCase { + + protected Path segmentRepoPath; + protected Path translogRepoPath; + Settings extraSettings = Settings.EMPTY; + private final List documentKeys = List.of( + randomAlphaOfLength(5), + randomAlphaOfLength(5), + randomAlphaOfLength(5), + randomAlphaOfLength(5), + randomAlphaOfLength(5) + ); + + protected static final String REPOSITORY_NAME = "test-remote-store-repo"; + protected static final String REPOSITORY_2_NAME = "test-remote-store-repo-2"; + protected static final String TARGET_WARM_TIER = "warm"; + + /** + * Disable MockFSIndexStore plugin as it wraps the FSDirectory over a OpenSearchMockDirectoryWrapper which extends FilterDirectory (whereas FSDirectory extends BaseDirectory) + * As a result of this wrapping the local directory of Composite Directory does not satisfy the assertion that local directory must be of type FSDirectory + * + */ + @Override + protected boolean addMockIndexStorePlugin() { + return false; + } + + @Override + protected Collection> nodePlugins() { + return Collections.singletonList(MockInternalClusterInfoService.TestPlugin.class); + } + + @Override + protected Settings featureFlagSettings() { + Settings.Builder featureSettings = Settings.builder(); + featureSettings.put(FeatureFlags.TIERED_REMOTE_INDEX, true); + return featureSettings.build(); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + if (segmentRepoPath == null || translogRepoPath == null) { + segmentRepoPath = randomRepoPath().toAbsolutePath(); + translogRepoPath = randomRepoPath().toAbsolutePath(); + } + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(extraSettings) + .put(remoteStoreClusterSettings(REPOSITORY_NAME, segmentRepoPath, REPOSITORY_2_NAME, translogRepoPath)) + .put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) + .build(); + } + + protected BulkResponse indexBulk(String indexName, int numDocs) { + BulkRequest bulkRequest = new BulkRequest(); + for (int i = 0; i < numDocs; i++) { + final IndexRequest request = client().prepareIndex(indexName) + .setId(UUIDs.randomBase64UUID()) + .setSource(documentKeys.get(randomIntBetween(0, documentKeys.size() - 1)), randomAlphaOfLength(5)) + .request(); + bulkRequest.add(request); + } + return client().bulk(bulkRequest).actionGet(); + } + + protected MockInternalClusterInfoService getMockInternalClusterInfoService() { + return (MockInternalClusterInfoService) internalCluster().getCurrentClusterManagerNodeInstance(ClusterInfoService.class); + } + + protected static FsInfo.Path setDiskUsage(FsInfo.Path original, long totalBytes, long freeBytes) { + return new FsInfo.Path(original.getPath(), original.getMount(), totalBytes, freeBytes, freeBytes); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/tiering/TieringRequestContext.java b/server/src/main/java/org/opensearch/action/admin/indices/tiering/TieringRequestContext.java new file mode 100644 index 0000000000000..239d1711e4fe0 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/tiering/TieringRequestContext.java @@ -0,0 +1,104 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.tiering; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.Index; + +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; + +/** + * Context class to hold indices to be tiered per request. It also holds + * the listener per request to mark the request as complete once all + * tiering operations are completed. + * + * @opensearch.experimental + */ + +@ExperimentalApi +public class TieringRequestContext { + private final ActionListener actionListener; + private final Set inProgressIndices; + private final Set tieredIndices; + private final Set completedIndices; + private final Map failedIndices; + + public TieringRequestContext( + ActionListener actionListener, + Set acceptedIndices, + Map failedIndices + ) { + this.actionListener = actionListener; + // by default all the accepted indices are added to the in-progress set + this.inProgressIndices = ConcurrentHashMap.newKeySet(); + inProgressIndices.addAll(acceptedIndices); + this.failedIndices = failedIndices; + this.tieredIndices = new HashSet<>(); + this.completedIndices = new HashSet<>(); + } + + public ActionListener getListener() { + return actionListener; + } + + public Map getFailedIndices() { + return failedIndices; + } + + public Set getInProgressIndices() { + return inProgressIndices; + } + + public Set getCompletedIndices() { + return completedIndices; + } + + public Set getTieredIndices() { + return tieredIndices; + } + + public boolean isRequestProcessingComplete() { + return inProgressIndices.isEmpty() && tieredIndices.isEmpty(); + } + + public void addToFailed(Index index, String reason) { + inProgressIndices.remove(index); + failedIndices.put(index, reason); + } + + public void addToTiered(Index index) { + inProgressIndices.remove(index); + tieredIndices.add(index); + } + + public void addToCompleted(Index index) { + tieredIndices.remove(index); + completedIndices.add(index); + } + + @Override + public String toString() { + return "TieringRequestContext{" + + "actionListener=" + + actionListener + + ", inProgressIndices=" + + inProgressIndices + + ", tieredIndices=" + + tieredIndices + + ", completedIndices=" + + completedIndices + + ", failedIndices=" + + failedIndices + + '}'; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/tiering/TieringUpdateClusterStateRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/tiering/TieringUpdateClusterStateRequest.java new file mode 100644 index 0000000000000..c22a760679495 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/tiering/TieringUpdateClusterStateRequest.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.tiering; + +import org.opensearch.cluster.ack.IndicesClusterStateUpdateRequest; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.index.Index; + +import java.util.Map; + +/** + * Cluster state update request that allows tiering for indices + * + * @opensearch.experimental + */ +@ExperimentalApi +public class TieringUpdateClusterStateRequest extends IndicesClusterStateUpdateRequest { + + private final Map rejectedIndices; + private final boolean waitForCompletion; + + public TieringUpdateClusterStateRequest(Map rejectedIndices, boolean waitForCompletion) { + this.rejectedIndices = rejectedIndices; + this.waitForCompletion = waitForCompletion; + } + + public boolean waitForCompletion() { + return waitForCompletion; + } + + public Map getRejectedIndices() { + return rejectedIndices; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/tiering/TieringUtils.java b/server/src/main/java/org/opensearch/action/admin/indices/tiering/TieringUtils.java new file mode 100644 index 0000000000000..3c4e1d5b2f7d6 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/tiering/TieringUtils.java @@ -0,0 +1,39 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.tiering; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.index.Index; + +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +/** + * Utility class for tiering operations + * + * @opensearch.experimental + */ +@ExperimentalApi +public class TieringUtils { + + /** + * Constructs a HotToWarmTieringResponse from the rejected indices map + * + * @param rejectedIndices the rejected indices map + * @return the HotToWarmTieringResponse object + */ + public static HotToWarmTieringResponse constructToHotToWarmTieringResponse(final Map rejectedIndices) { + final List indicesResult = new LinkedList<>(); + for (Map.Entry rejectedIndex : rejectedIndices.entrySet()) { + indicesResult.add(new HotToWarmTieringResponse.IndexResult(rejectedIndex.getKey().getName(), rejectedIndex.getValue())); + } + return new HotToWarmTieringResponse(true, indicesResult); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/tiering/TieringValidationResult.java b/server/src/main/java/org/opensearch/action/admin/indices/tiering/TieringValidationResult.java index ccd60daf027ce..f656d7dd28357 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/tiering/TieringValidationResult.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/tiering/TieringValidationResult.java @@ -12,8 +12,6 @@ import org.opensearch.core.index.Index; import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -50,14 +48,6 @@ public void addToRejected(Index index, String reason) { rejectedIndices.put(index, reason); } - public HotToWarmTieringResponse constructResponse() { - final List indicesResult = new LinkedList<>(); - for (Map.Entry rejectedIndex : rejectedIndices.entrySet()) { - indicesResult.add(new HotToWarmTieringResponse.IndexResult(rejectedIndex.getKey().getName(), rejectedIndex.getValue())); - } - return new HotToWarmTieringResponse(acceptedIndices.size() > 0, indicesResult); - } - @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/tiering/TransportHotToWarmTieringAction.java b/server/src/main/java/org/opensearch/action/admin/indices/tiering/TransportHotToWarmTieringAction.java index 8d1ab0bb37cdd..257507f100ccf 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/tiering/TransportHotToWarmTieringAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/tiering/TransportHotToWarmTieringAction.java @@ -25,12 +25,16 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.Index; +import org.opensearch.index.IndexModule; +import org.opensearch.indices.tiering.HotToWarmTieringService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; import java.io.IOException; -import java.util.Set; +import java.util.List; +import static org.opensearch.action.admin.indices.tiering.TieringUtils.constructToHotToWarmTieringResponse; +import static org.opensearch.common.util.set.Sets.newHashSet; import static org.opensearch.indices.tiering.TieringRequestValidator.validateHotToWarm; /** @@ -44,6 +48,7 @@ public class TransportHotToWarmTieringAction extends TransportClusterManagerNode private static final Logger logger = LogManager.getLogger(TransportHotToWarmTieringAction.class); private final ClusterInfoService clusterInfoService; private final DiskThresholdSettings diskThresholdSettings; + private final HotToWarmTieringService hotToWarmTieringService; @Inject public TransportHotToWarmTieringAction( @@ -53,7 +58,8 @@ public TransportHotToWarmTieringAction( ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, ClusterInfoService clusterInfoService, - Settings settings + Settings settings, + HotToWarmTieringService hotToWarmTieringService ) { super( HotToWarmTieringAction.NAME, @@ -66,6 +72,7 @@ public TransportHotToWarmTieringAction( ); this.clusterInfoService = clusterInfoService; this.diskThresholdSettings = new DiskThresholdSettings(settings, clusterService.getClusterSettings()); + this.hotToWarmTieringService = hotToWarmTieringService; } @Override @@ -90,21 +97,34 @@ protected void clusterManagerOperation( ClusterState state, ActionListener listener ) throws Exception { - Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); + Index[] concreteIndices = indexNameExpressionResolver.concreteIndicesInTier(state, request, IndexModule.TieringState.HOT); if (concreteIndices == null || concreteIndices.length == 0) { + logger.info( + "[HotToWarmTiering] No hot concrete indices resolved for the indices {} in the request", + List.of(request.indices()) + ); listener.onResponse(new HotToWarmTieringResponse(true)); return; } final TieringValidationResult tieringValidationResult = validateHotToWarm( state, - Set.of(concreteIndices), + newHashSet(concreteIndices), clusterInfoService.getClusterInfo(), diskThresholdSettings ); if (tieringValidationResult.getAcceptedIndices().isEmpty()) { - listener.onResponse(tieringValidationResult.constructResponse()); + listener.onResponse(constructToHotToWarmTieringResponse(tieringValidationResult.getRejectedIndices())); return; } + + final TieringUpdateClusterStateRequest updateClusterStateRequest = new TieringUpdateClusterStateRequest( + tieringValidationResult.getRejectedIndices(), + request.waitForCompletion() + ).ackTimeout(request.timeout()) + .masterNodeTimeout(request.clusterManagerNodeTimeout()) + .indices(tieringValidationResult.getAcceptedIndices().toArray(Index.EMPTY_ARRAY)); + + hotToWarmTieringService.tier(updateClusterStateRequest, listener); } } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index df0d2609ad83d..3199aaefbf5d2 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -95,6 +95,7 @@ import static org.opensearch.cluster.node.DiscoveryNodeFilters.OpType.OR; import static org.opensearch.common.settings.Settings.readSettingsFromStream; import static org.opensearch.common.settings.Settings.writeSettingsToStream; +import static org.opensearch.index.IndexModule.INDEX_TIERING_STATE; /** * Index metadata information @@ -638,6 +639,7 @@ public static APIBlock readFrom(StreamInput input) throws IOException { static final String KEY_SYSTEM = "system"; public static final String KEY_PRIMARY_TERMS = "primary_terms"; public static final String REMOTE_STORE_CUSTOM_KEY = "remote_store"; + public static final String TIERING_CUSTOM_KEY = "tiering"; public static final String TRANSLOG_METADATA_KEY = "translog_metadata"; public static final String INDEX_STATE_FILE_PREFIX = "state-"; @@ -687,6 +689,8 @@ public static APIBlock readFrom(StreamInput input) throws IOException { private final boolean isSystem; private final boolean isRemoteSnapshot; + private final IndexModule.TieringState tieringState; + private final int indexTotalShardsPerNodeLimit; private IndexMetadata( @@ -750,6 +754,7 @@ private IndexMetadata( this.rolloverInfos = Collections.unmodifiableMap(rolloverInfos); this.isSystem = isSystem; this.isRemoteSnapshot = IndexModule.Type.REMOTE_SNAPSHOT.match(this.settings); + this.tieringState = IndexModule.TieringState.valueOf(INDEX_TIERING_STATE.get(settings)); this.indexTotalShardsPerNodeLimit = indexTotalShardsPerNodeLimit; assert numberOfShards * routingFactor == routingNumShards : routingNumShards + " must be a multiple of " + numberOfShards; } @@ -1220,6 +1225,14 @@ public boolean isRemoteSnapshot() { return isRemoteSnapshot; } + public boolean isHotIndex() { + return IndexModule.TieringState.HOT.equals(tieringState); + } + + public boolean isIndexInTier(IndexModule.TieringState tieringState) { + return this.tieringState == tieringState; + } + public static Builder builder(String index) { return new Builder(index); } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexNameExpressionResolver.java index 24ff83d638d4b..75b21983ec1a5 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexNameExpressionResolver.java @@ -38,6 +38,7 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.common.Booleans; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; import org.opensearch.common.logging.DeprecationLogger; @@ -50,6 +51,7 @@ import org.opensearch.core.common.Strings; import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.core.index.Index; +import org.opensearch.index.IndexModule; import org.opensearch.index.IndexNotFoundException; import org.opensearch.indices.IndexClosedException; import org.opensearch.indices.InvalidIndexNameException; @@ -120,6 +122,35 @@ public String[] concreteIndexNamesWithSystemIndexAccess(ClusterState state, Indi return concreteIndexNames(context, request.indices()); } + /** + * Returns the concrete indices that match the provided tiering state. + * + * @param state cluster state + * @param request indices request + * @param tieringState the tiering state of indices + * @return array of concrete indices resolved having the provided tiering state + */ + @ExperimentalApi + public Index[] concreteIndicesInTier(ClusterState state, IndicesRequest request, IndexModule.TieringState tieringState) { + Context context = new Context( + state, + request.indicesOptions(), + false, + false, + request.includeDataStreams(), + isSystemIndexAccessAllowed() + ); + final Index[] concreteIndices = concreteIndices(context, request.indices()); + final Set indicesWithTargetTier = new HashSet<>(); + for (Index index : concreteIndices) { + IndexMetadata indexMetadata = state.metadata().getIndexSafe(index); + if (indexMetadata.isIndexInTier(tieringState)) { + indicesWithTargetTier.add(index); + } + } + return indicesWithTargetTier.toArray(Index.EMPTY_ARRAY); + } + /** * Same as {@link #concreteIndices(ClusterState, IndicesOptions, String...)}, but the index expressions and options * are encapsulated in the specified request and resolves data streams. diff --git a/server/src/main/java/org/opensearch/indices/tiering/HotToWarmTieringService.java b/server/src/main/java/org/opensearch/indices/tiering/HotToWarmTieringService.java new file mode 100644 index 0000000000000..854a039e2756e --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/tiering/HotToWarmTieringService.java @@ -0,0 +1,348 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.tiering; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; +import org.opensearch.action.admin.indices.tiering.HotToWarmTieringResponse; +import org.opensearch.action.admin.indices.tiering.TieringRequestContext; +import org.opensearch.action.admin.indices.tiering.TieringUpdateClusterStateRequest; +import org.opensearch.cluster.ClusterChangedEvent; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.ClusterStateListener; +import org.opensearch.cluster.ClusterStateUpdateTask; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.allocation.AllocationService; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Priority; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.Index; +import org.opensearch.index.IndexModule; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; + +import static org.opensearch.action.admin.indices.tiering.TieringUtils.constructToHotToWarmTieringResponse; +import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING; +import static org.opensearch.cluster.metadata.IndexMetadata.TIERING_CUSTOM_KEY; +import static org.opensearch.index.IndexModule.INDEX_STORE_LOCALITY_SETTING; +import static org.opensearch.index.IndexModule.INDEX_TIERING_STATE; + +/** + * Service responsible for tiering indices from hot to warm + * @opensearch.experimental + */ +@ExperimentalApi +public class HotToWarmTieringService extends AbstractLifecycleComponent implements ClusterStateListener { + private static final Logger logger = LogManager.getLogger(HotToWarmTieringService.class); + private final ClusterService clusterService; + private final IndexNameExpressionResolver indexNameExpressionResolver; + private final AllocationService allocationService; + private final Set tieringRequestContexts = ConcurrentHashMap.newKeySet(); + static final String TIERING_START_TIME = "start_time"; + + @Inject + public HotToWarmTieringService( + Settings settings, + ClusterService clusterService, + IndexNameExpressionResolver indexNameExpressionResolver, + AllocationService allocationService + ) { + this.clusterService = clusterService; + this.indexNameExpressionResolver = indexNameExpressionResolver; + this.allocationService = allocationService; + + if (DiscoveryNode.isClusterManagerNode(settings) && FeatureFlags.isEnabled(FeatureFlags.TIERED_REMOTE_INDEX)) { + clusterService.addListener(this); + } + } + + @Override + public void clusterChanged(ClusterChangedEvent event) { + // TODO: https://github.com/opensearch-project/OpenSearch/issues/14981 + if (event.routingTableChanged()) { + if (!tieringRequestContexts.isEmpty()) { + processTieringRequestContexts(event.state()); + } + } + } + + void processTieringRequestContexts(final ClusterState clusterState) { + final Map tieredIndices = new HashMap<>(); + for (TieringRequestContext tieringRequestContext : tieringRequestContexts) { + if (tieringRequestContext.isRequestProcessingComplete()) { + logger.info("[HotToWarmTiering] Tiering is completed for the request [{}]", tieringRequestContext); + completeRequestLevelTiering(tieringRequestContext); + continue; + } + List shardRoutings; + for (Index index : tieringRequestContext.getInProgressIndices()) { + if (clusterState.routingTable().hasIndex(index)) { + // Ensure index is not deleted + shardRoutings = clusterState.routingTable().allShards(index.getName()); + } else { + // Index already deleted nothing to do + logger.warn("[HotToWarmTiering] Index [{}] deleted before shard relocation finished", index.getName()); + tieringRequestContext.addToFailed(index, "index not found"); + continue; + } + + boolean relocationCompleted = true; + for (ShardRouting shard : shardRoutings) { + if (!isShardInWarmTier(shard, clusterState)) { + relocationCompleted = false; + break; + } + } + if (relocationCompleted) { + logger.debug("[HotToWarmTiering] Shard relocation completed for index [{}]", index.getName()); + tieringRequestContext.addToTiered(index); + tieredIndices.put(index, tieringRequestContext); + } + } + } + if (!tieredIndices.isEmpty()) { + updateClusterStateForTieredIndices(tieredIndices); + } + } + + /** + * Checks if the shard is in the warm tier. + * @param shard shard routing + * @param clusterState current cluster state + * @return true if shard is started on the search node, false otherwise + */ + boolean isShardInWarmTier(final ShardRouting shard, final ClusterState clusterState) { + if (shard.unassigned()) { + return false; + } + final boolean isShardFoundOnSearchNode = clusterState.getNodes().get(shard.currentNodeId()).isSearchNode(); + return shard.started() && isShardFoundOnSearchNode; + } + + /** + * Completes the request level tiering for requestContext. + * @param requestContext tiering request context + */ + void completeRequestLevelTiering(TieringRequestContext requestContext) { + tieringRequestContexts.remove(requestContext); + if (requestContext.getListener() != null) { + requestContext.getListener().onResponse(constructToHotToWarmTieringResponse(requestContext.getFailedIndices())); + } + } + + /** + * Updates the request context for tiered indices, + * Moves tiered indices to successful state, + * Checks and completes the request level tiering + * @param tieredIndices map of tiered indices and their request contexts + */ + void updateRequestContextForTieredIndices(final Map tieredIndices) { + for (Map.Entry entry : tieredIndices.entrySet()) { + Index index = entry.getKey(); + TieringRequestContext tieringRequestContext = entry.getValue(); + tieringRequestContext.addToCompleted(index); + if (tieringRequestContext.isRequestProcessingComplete()) { + logger.info("[HotToWarmTiering] Tiering is completed for the request [{}]", tieringRequestContext); + completeRequestLevelTiering(tieringRequestContext); + } + } + } + + /** + * Updates the index metadata with the tiering settings/metadata for an accepted index. + * Accepted index is an index to be tiered from hot to warm. + * @param metadataBuilder metadata builder + * @param routingTableBuilder routing builder + * @param indexMetadata index metadata + * @param index index + */ + void updateIndexMetadataForAcceptedIndex( + final Metadata.Builder metadataBuilder, + final RoutingTable.Builder routingTableBuilder, + final IndexMetadata indexMetadata, + final Index index + ) { + Settings.Builder indexSettingsBuilder = Settings.builder().put(indexMetadata.getSettings()); + // update index settings here + indexSettingsBuilder.put(INDEX_STORE_LOCALITY_SETTING.getKey(), IndexModule.DataLocalityType.PARTIAL); + indexSettingsBuilder.put(INDEX_TIERING_STATE.getKey(), IndexModule.TieringState.HOT_TO_WARM); + + // Update number of replicas to 1 in case the number of replicas is greater than 1 + if (Integer.parseInt(indexMetadata.getSettings().get(INDEX_NUMBER_OF_REPLICAS_SETTING.getKey())) > 1) { + final String[] indices = new String[] { index.getName() }; + routingTableBuilder.updateNumberOfReplicas(1, indices); + metadataBuilder.updateNumberOfReplicas(1, indices); + } + // trying to put transient index metadata in the custom index metadata + IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(indexMetadata).settings(indexSettingsBuilder); + final Map tieringCustomData = new HashMap<>(); + tieringCustomData.put(TIERING_START_TIME, String.valueOf(System.currentTimeMillis())); + indexMetadataBuilder.putCustom(TIERING_CUSTOM_KEY, tieringCustomData); + // Update index settings version + indexMetadataBuilder.settingsVersion(1 + indexMetadataBuilder.settingsVersion()); + metadataBuilder.put(indexMetadataBuilder); + } + + /** + * Updates the cluster state by updating the index metadata for tiered indices. + * @param tieredIndices set of tiered indices + */ + void updateClusterStateForTieredIndices(final Map tieredIndices) { + clusterService.submitStateUpdateTask( + "complete hot to warm tiering for tiered indices: " + tieredIndices.keySet(), + new ClusterStateUpdateTask(Priority.NORMAL) { + + @Override + public ClusterState execute(ClusterState currentState) { + final Metadata.Builder metadataBuilder = Metadata.builder(currentState.metadata()); + for (Map.Entry entry : tieredIndices.entrySet()) { + Index index = entry.getKey(); + final IndexMetadata indexMetadata = currentState.metadata().index(index); + if (indexMetadata == null) { + entry.getValue().addToFailed(index, "index not found"); + continue; + } + updateIndexMetadataForTieredIndex(metadataBuilder, indexMetadata); + } + return ClusterState.builder(currentState).metadata(metadataBuilder).build(); + } + + @Override + public void onFailure(String source, Exception e) { + logger.error( + (Supplier) () -> new ParameterizedMessage( + "[HotToWarmTiering] failed to complete tiering for tiered indices " + "[{}]", + tieredIndices + ), + e + ); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + logger.info("[HotToWarmTiering] Cluster state updated for source " + source); + updateRequestContextForTieredIndices(tieredIndices); + } + } + ); + } + + /** + * Updates the index metadata with the tiering settings/metadata for a tiered index. + * @param metadataBuilder metadata builder + * @param indexMetadata index metadata + */ + void updateIndexMetadataForTieredIndex(final Metadata.Builder metadataBuilder, final IndexMetadata indexMetadata) { + Settings.Builder indexSettingsBuilder = Settings.builder().put(indexMetadata.getSettings()); + // update tiering settings here + indexSettingsBuilder.put(INDEX_TIERING_STATE.getKey(), IndexModule.TieringState.WARM); + // trying to put transient index metadata in the custom index metadata + IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(indexMetadata).settings(indexSettingsBuilder); + indexMetadataBuilder.removeCustom(TIERING_CUSTOM_KEY); + + // Update index settings version + indexMetadataBuilder.settingsVersion(1 + indexMetadataBuilder.settingsVersion()); + metadataBuilder.put(indexMetadataBuilder); + } + + /** + * Tier indices from hot to warm + * @param request - tiering update cluster state request + * @param listener - call back listener + */ + public void tier(final TieringUpdateClusterStateRequest request, final ActionListener listener) { + final Set indices = Set.of(request.indices()); + final TieringRequestContext tieringRequestContext = new TieringRequestContext( + request.waitForCompletion() ? listener : null, + indices, + request.getRejectedIndices() + ); + + logger.info("[HotToWarmTiering] Starting hot to warm tiering for indices {}", indices); + clusterService.submitStateUpdateTask("start hot to warm tiering: " + indices, new ClusterStateUpdateTask(Priority.URGENT) { + + @Override + public ClusterState execute(ClusterState currentState) { + final RoutingTable.Builder routingTableBuilder = RoutingTable.builder(currentState.routingTable()); + final Metadata.Builder metadataBuilder = Metadata.builder(currentState.metadata()); + for (Index index : tieringRequestContext.getInProgressIndices()) { + final IndexMetadata indexMetadata = currentState.metadata().index(index); + if (indexMetadata == null) { + tieringRequestContext.addToFailed(index, "index not found"); + continue; + } else if (indexMetadata.isHotIndex()) { + tieringRequestContext.addToFailed(index, "index is not in the HOT tier"); + continue; + } + updateIndexMetadataForAcceptedIndex(metadataBuilder, routingTableBuilder, indexMetadata, index); + } + ClusterState updatedState = ClusterState.builder(currentState) + .metadata(metadataBuilder) + .routingTable(routingTableBuilder.build()) + .build(); + + // now, reroute to trigger shard relocation for the dedicated case + updatedState = allocationService.reroute(updatedState, "hot to warm tiering"); + + return updatedState; + } + + @Override + public void onFailure(String source, Exception e) { + logger.error( + (Supplier) () -> new ParameterizedMessage("[HotToWarmTiering] failed tiering for indices " + "[{}]", indices), + e + ); + listener.onFailure(e); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + logger.info("[HotToWarmTiering] Cluster state updated for source " + source); + tieringRequestContexts.add(tieringRequestContext); + if (!request.waitForCompletion()) { + listener.onResponse(constructToHotToWarmTieringResponse(tieringRequestContext.getFailedIndices())); + } + } + + @Override + public TimeValue timeout() { + return request.clusterManagerNodeTimeout(); + } + }); + } + + @Override + protected void doStart() {} + + @Override + protected void doStop() {} + + @Override + protected void doClose() throws IOException {} + +} diff --git a/server/src/main/java/org/opensearch/indices/tiering/TieringRequestValidator.java b/server/src/main/java/org/opensearch/indices/tiering/TieringRequestValidator.java index 2de50f4d4295d..0b81393d21bd4 100644 --- a/server/src/main/java/org/opensearch/indices/tiering/TieringRequestValidator.java +++ b/server/src/main/java/org/opensearch/indices/tiering/TieringRequestValidator.java @@ -22,7 +22,6 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.allocation.DiskThresholdSettings; import org.opensearch.core.index.Index; -import org.opensearch.index.IndexModule; import java.util.HashMap; import java.util.List; @@ -30,8 +29,6 @@ import java.util.Set; import java.util.stream.Collectors; -import static org.opensearch.index.IndexModule.INDEX_TIERING_STATE; - /** * Validator class to validate the tiering requests of the index * @opensearch.experimental @@ -62,10 +59,6 @@ public static TieringValidationResult validateHotToWarm( final TieringValidationResult tieringValidationResult = new TieringValidationResult(concreteIndices); for (Index index : concreteIndices) { - if (!validateHotIndex(currentState, index)) { - tieringValidationResult.addToRejected(index, "index is not in the HOT tier"); - continue; - } if (!validateRemoteStoreIndex(currentState, index)) { tieringValidationResult.addToRejected(index, "index is not backed up by the remote store"); continue; @@ -82,7 +75,7 @@ public static TieringValidationResult validateHotToWarm( validateEligibleNodesCapacity(clusterInfo, currentState, tieringValidationResult); logger.info( - "Successfully accepted indices for tiering are [{}], rejected indices are [{}]", + "[HotToWarmTiering] Successfully accepted indices for tiering are [{}], rejected indices are [{}]", tieringValidationResult.getAcceptedIndices(), tieringValidationResult.getRejectedIndices() ); @@ -119,17 +112,6 @@ static boolean validateRemoteStoreIndex(final ClusterState state, final Index in return IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING.get(state.metadata().getIndexSafe(index).getSettings()); } - /** - * Validates that the specified index is in the "hot" tiering state. - * - * @param state the current cluster state - * @param index the index to be validated - * @return true if the index is in the "hot" tiering state, false otherwise - */ - static boolean validateHotIndex(final ClusterState state, final Index index) { - return IndexModule.TieringState.HOT.name().equals(INDEX_TIERING_STATE.get(state.metadata().getIndexSafe(index).getSettings())); - } - /** * Validates the health of the specified index in the current cluster state. * @@ -172,7 +154,7 @@ static void validateDiskThresholdWaterMarkNotBreached( ) { final Map usages = clusterInfo.getNodeLeastAvailableDiskUsages(); if (usages == null) { - logger.trace("skipping monitor as no disk usage information is available"); + logger.trace("[Tiering] skipping monitor as no disk usage information is available"); return; } final Set nodeIds = getEligibleNodes(currentState).stream().map(DiscoveryNode::getId).collect(Collectors.toSet()); diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index cbed8dfea8cc4..5ba3965323396 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -179,6 +179,7 @@ import org.opensearch.indices.replication.SegmentReplicationSourceService; import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.indices.store.IndicesStore; +import org.opensearch.indices.tiering.HotToWarmTieringService; import org.opensearch.ingest.IngestService; import org.opensearch.monitor.MonitorService; import org.opensearch.monitor.fs.FsHealthService; @@ -1207,6 +1208,13 @@ protected Node( remoteClusterStateService ); + final HotToWarmTieringService hotToWarmTieringService = new HotToWarmTieringService( + settings, + clusterService, + clusterModule.getIndexNameExpressionResolver(), + clusterModule.getAllocationService() + ); + final DiskThresholdMonitor diskThresholdMonitor = new DiskThresholdMonitor( settings, clusterService::state, @@ -1403,6 +1411,9 @@ protected Node( b.bind(TransportNodesSnapshotsStatus.class).toInstance(nodesSnapshotsStatus); b.bind(RestoreService.class).toInstance(restoreService); b.bind(RemoteStoreRestoreService.class).toInstance(remoteStoreRestoreService); + if (FeatureFlags.isEnabled(FeatureFlags.TIERED_REMOTE_INDEX)) { + b.bind(HotToWarmTieringService.class).toInstance(hotToWarmTieringService); + } b.bind(RerouteService.class).toInstance(rerouteService); b.bind(ShardLimitValidator.class).toInstance(shardLimitValidator); b.bind(FsHealthService.class).toInstance(fsHealthService); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/tiering/TransportHotToWarmTieringActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/tiering/TransportHotToWarmTieringActionTests.java index 10273366af804..c8880112f8c8e 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/tiering/TransportHotToWarmTieringActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/tiering/TransportHotToWarmTieringActionTests.java @@ -109,7 +109,7 @@ public void testNoConcreteIndices() { public void testNoAcceptedIndices() { TieringIndexRequest request = new TieringIndexRequest(TARGET_TIER, "test-idx-*", "idx-*"); HotToWarmTieringResponse response = client().admin().indices().execute(HotToWarmTieringAction.INSTANCE, request).actionGet(); - assertFalse(response.isAcknowledged()); + assertTrue(response.isAcknowledged()); assertEquals(2, response.getFailedIndices().size()); for (HotToWarmTieringResponse.IndexResult result : response.getFailedIndices()) { assertEquals("index is not backed up by the remote store", result.getFailureReason()); diff --git a/server/src/test/java/org/opensearch/cluster/metadata/IndexNameExpressionResolverTests.java b/server/src/test/java/org/opensearch/cluster/metadata/IndexNameExpressionResolverTests.java index fda2f411b1994..316c0bee1e614 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/IndexNameExpressionResolverTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/IndexNameExpressionResolverTests.java @@ -49,6 +49,7 @@ import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.core.common.Strings; import org.opensearch.core.index.Index; +import org.opensearch.index.IndexModule; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.IndexSettings; import org.opensearch.indices.IndexClosedException; @@ -70,6 +71,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_HIDDEN_SETTING; import static org.opensearch.cluster.metadata.IndexNameExpressionResolver.SYSTEM_INDEX_ACCESS_CONTROL_HEADER_KEY; import static org.opensearch.common.util.set.Sets.newHashSet; +import static org.opensearch.index.IndexModule.INDEX_TIERING_STATE; import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.arrayContainingInAnyOrder; import static org.hamcrest.Matchers.arrayWithSize; @@ -891,6 +893,44 @@ public void testConcreteIndicesNoIndicesErrorMessageNoExpand() { assertThat(infe.getMessage(), is("no such index [_all] and no indices exist")); } + public void testConcreteIndicesWithHotTier() { + Metadata.Builder mdBuilder = Metadata.builder() + .put( + indexBuilder("test-hot", Settings.builder().put(INDEX_TIERING_STATE.getKey(), IndexModule.TieringState.HOT.name()).build()) + .state(State.OPEN) + ) + .put( + indexBuilder( + "test-warm", + Settings.builder().put(INDEX_TIERING_STATE.getKey(), IndexModule.TieringState.WARM.name()).build() + ).state(State.OPEN) + ); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metadata(mdBuilder).build(); + SearchRequest request = new SearchRequest("test*"); + Index[] indices = indexNameExpressionResolver.concreteIndicesInTier(state, request, IndexModule.TieringState.HOT); + assertEquals(1, indices.length); + assertEquals("test-hot", indices[0].getName()); + } + + public void testConcreteIndicesWithWarmTier() { + Metadata.Builder mdBuilder = Metadata.builder() + .put( + indexBuilder("test-hot", Settings.builder().put(INDEX_TIERING_STATE.getKey(), IndexModule.TieringState.HOT.name()).build()) + .state(State.OPEN) + ) + .put( + indexBuilder( + "test-warm", + Settings.builder().put(INDEX_TIERING_STATE.getKey(), IndexModule.TieringState.WARM.name()).build() + ).state(State.OPEN) + ); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metadata(mdBuilder).build(); + SearchRequest request = new SearchRequest("test*"); + Index[] indices = indexNameExpressionResolver.concreteIndicesInTier(state, request, IndexModule.TieringState.WARM); + assertEquals(1, indices.length); + assertEquals("test-warm", indices[0].getName()); + } + public void testConcreteIndicesWildcardExpansion() { Metadata.Builder mdBuilder = Metadata.builder() .put(indexBuilder("testXXX").state(State.OPEN)) diff --git a/server/src/test/java/org/opensearch/indices/tiering/HotToWarmTieringServiceTests.java b/server/src/test/java/org/opensearch/indices/tiering/HotToWarmTieringServiceTests.java new file mode 100644 index 0000000000000..10b81e1d6b905 --- /dev/null +++ b/server/src/test/java/org/opensearch/indices/tiering/HotToWarmTieringServiceTests.java @@ -0,0 +1,74 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.tiering; + +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.core.index.Index; +import org.opensearch.index.IndexModule; +import org.opensearch.test.OpenSearchSingleNodeTestCase; +import org.junit.Before; + +import java.util.HashMap; +import java.util.Map; + +import static org.opensearch.index.IndexModule.INDEX_STORE_LOCALITY_SETTING; +import static org.opensearch.index.IndexModule.INDEX_TIERING_STATE; + +public class HotToWarmTieringServiceTests extends OpenSearchSingleNodeTestCase { + + private ClusterService clusterService; + private HotToWarmTieringService hotToWarmTieringService; + + @Before + public void beforeTest() { + clusterService = this.getInstanceFromNode(ClusterService.class); + hotToWarmTieringService = this.getInstanceFromNode(HotToWarmTieringService.class); + } + + public void testUpdateIndexMetadataForAcceptedIndices() { + String indexName = "test_index"; + createIndex(indexName); + Index index = resolveIndex(indexName); + final Metadata.Builder metadataBuilder = Metadata.builder(clusterService.state().metadata()); + final RoutingTable.Builder routingTableBuilder = RoutingTable.builder(clusterService.state().routingTable()); + hotToWarmTieringService.updateIndexMetadataForAcceptedIndex( + metadataBuilder, + routingTableBuilder, + clusterService.state().metadata().index(index), + index + ); + IndexMetadata indexMetadata = metadataBuilder.build().index(indexName); + assertEquals( + IndexModule.DataLocalityType.PARTIAL, + IndexModule.DataLocalityType.getValueOf(indexMetadata.getSettings().get(INDEX_STORE_LOCALITY_SETTING.getKey())) + ); + assertEquals(IndexModule.TieringState.HOT_TO_WARM.name(), indexMetadata.getSettings().get(INDEX_TIERING_STATE.getKey())); + Map customData = indexMetadata.getCustomData(IndexMetadata.TIERING_CUSTOM_KEY); + assertNotNull(customData); + assertNotNull(customData.get(HotToWarmTieringService.TIERING_START_TIME)); + } + + public void testUpdateIndexMetadataForSuccessfulIndex() { + String indexName = "test_index"; + createIndex(indexName); + Index index = resolveIndex(indexName); + final Metadata.Builder metadataBuilder = Metadata.builder(clusterService.state().metadata()); + Map customData = new HashMap<>(); + customData.put(HotToWarmTieringService.TIERING_START_TIME, String.valueOf(System.currentTimeMillis())); + metadataBuilder.put(IndexMetadata.builder(metadataBuilder.getSafe(index)).putCustom(IndexMetadata.TIERING_CUSTOM_KEY, customData)); + hotToWarmTieringService.updateIndexMetadataForTieredIndex(metadataBuilder, clusterService.state().metadata().index(index)); + IndexMetadata indexMetadata = metadataBuilder.build().index(indexName); + assertEquals(IndexModule.TieringState.WARM.name(), indexMetadata.getSettings().get(INDEX_TIERING_STATE.getKey())); + customData = indexMetadata.getCustomData(IndexMetadata.TIERING_CUSTOM_KEY); + assertNull(customData); + } +} diff --git a/server/src/test/java/org/opensearch/indices/tiering/TieringRequestValidatorTests.java b/server/src/test/java/org/opensearch/indices/tiering/TieringRequestValidatorTests.java index 6b6f74353812b..d4436a2f66a34 100644 --- a/server/src/test/java/org/opensearch/indices/tiering/TieringRequestValidatorTests.java +++ b/server/src/test/java/org/opensearch/indices/tiering/TieringRequestValidatorTests.java @@ -24,7 +24,6 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.core.index.Index; -import org.opensearch.index.IndexModule; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.OpenSearchTestCase; @@ -42,7 +41,6 @@ import static org.opensearch.indices.tiering.TieringRequestValidator.getTotalAvailableBytesInWarmTier; import static org.opensearch.indices.tiering.TieringRequestValidator.validateDiskThresholdWaterMarkNotBreached; import static org.opensearch.indices.tiering.TieringRequestValidator.validateEligibleNodesCapacity; -import static org.opensearch.indices.tiering.TieringRequestValidator.validateHotIndex; import static org.opensearch.indices.tiering.TieringRequestValidator.validateIndexHealth; import static org.opensearch.indices.tiering.TieringRequestValidator.validateOpenIndex; import static org.opensearch.indices.tiering.TieringRequestValidator.validateRemoteStoreIndex; @@ -92,26 +90,6 @@ public void testDocRepIndex() { assertFalse(validateRemoteStoreIndex(buildClusterState(indexName, indexUuid, Settings.EMPTY), new Index(indexName, indexUuid))); } - public void testValidHotIndex() { - String indexUuid = UUID.randomUUID().toString(); - String indexName = "test_index"; - assertTrue(validateHotIndex(buildClusterState(indexName, indexUuid, Settings.EMPTY), new Index(indexName, indexUuid))); - } - - public void testIndexWithOngoingOrCompletedTiering() { - String indexUuid = UUID.randomUUID().toString(); - String indexName = "test_index"; - - IndexModule.TieringState tieringState = randomBoolean() ? IndexModule.TieringState.HOT_TO_WARM : IndexModule.TieringState.WARM; - - ClusterState clusterState = buildClusterState( - indexName, - indexUuid, - Settings.builder().put(IndexModule.INDEX_TIERING_STATE.getKey(), tieringState).build() - ); - assertFalse(validateHotIndex(clusterState, new Index(indexName, indexUuid))); - } - public void testValidateIndexHealth() { String indexUuid = UUID.randomUUID().toString(); String indexName = "test_index";