From 1945119011a58ee6affe81bae8f1504bfcf72e95 Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Tue, 12 Jul 2022 09:27:35 -0700 Subject: [PATCH 001/104] Mute testHighWatermarkNotExceeded (#3844) Signed-off-by: Suraj Singh --- .../routing/allocation/decider/DiskThresholdDeciderIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java index b5d260f5c3314..e26d5be03cf7a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java @@ -153,6 +153,7 @@ protected Collection> nodePlugins() { return Collections.singletonList(InternalSettingsPlugin.class); } + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/pull/3561") public void testHighWatermarkNotExceeded() throws Exception { internalCluster().startClusterManagerOnlyNode(); internalCluster().startDataOnlyNode(); From 9af340760557ad84a91192f7f7d3b1d6b51e5577 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Luk=C3=A1=C5=A1=20Vl=C4=8Dek?= Date: Tue, 12 Jul 2022 20:55:48 +0200 Subject: [PATCH 002/104] Upgrade MinIO image version (#3541) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On top of that addressing MinIO warning: - setup ulimits - use of static console port - do not use deprecated argument names With regard to static ports this is related to a change MinIO introduced in RELEASE.2021-07-08T01-15-01Z where the console was embedded into the server itself. For more details visit: https://docs.min.io/minio/baremetal/console/minio-console.html We could let the port be assigned dynamically, however, when we set it statically it can give user an option to make use of the console web UI for MinIO troubleshooting (though I think the ports still need to be exported in this case). Closes #3539 Signed-off-by: Lukáš Vlček --- test/fixtures/minio-fixture/Dockerfile | 6 ++-- .../fixtures/minio-fixture/docker-compose.yml | 36 ++++++++++++------- 2 files changed, 27 insertions(+), 15 deletions(-) diff --git a/test/fixtures/minio-fixture/Dockerfile b/test/fixtures/minio-fixture/Dockerfile index 54bb562c790c1..b56440c0d44a9 100644 --- a/test/fixtures/minio-fixture/Dockerfile +++ b/test/fixtures/minio-fixture/Dockerfile @@ -1,9 +1,9 @@ -FROM minio/minio:RELEASE.2019-01-23T23-18-58Z +FROM minio/minio:RELEASE.2022-06-25T15-50-16Z ARG bucket ARG accessKey ARG secretKey RUN mkdir -p /minio/data/${bucket} -ENV MINIO_ACCESS_KEY=${accessKey} -ENV MINIO_SECRET_KEY=${secretKey} +ENV MINIO_ROOT_USER=${accessKey} +ENV MINIO_ROOT_PASSWORD=${secretKey} diff --git a/test/fixtures/minio-fixture/docker-compose.yml b/test/fixtures/minio-fixture/docker-compose.yml index 7b66f04152b47..4c0245772ed4c 100644 --- a/test/fixtures/minio-fixture/docker-compose.yml +++ b/test/fixtures/minio-fixture/docker-compose.yml @@ -8,9 +8,13 @@ services: accessKey: "access_key" secretKey: "secret_key" dockerfile: Dockerfile + ulimits: + nofile: + hard: 4096 + soft: 4096 ports: - "9000" - command: ["server", "/minio/data"] + command: ["server", "--console-address", ":9001", "/minio/data"] minio-fixture-other: build: context: . @@ -19,17 +23,25 @@ services: accessKey: "access_key" secretKey: "secret_key" dockerfile: Dockerfile + ulimits: + nofile: + hard: 4096 + soft: 4096 ports: - "9000" - command: ["server", "/minio/data"] + command: ["server", "--console-address", ":9001", "/minio/data"] minio-fixture-for-snapshot-tool: - build: - context: . - args: - bucket: "bucket" - accessKey: "sn_tool_access_key" - secretKey: "sn_tool_secret_key" - dockerfile: Dockerfile - ports: - - "9000" - command: ["server", "/minio/data"] + build: + context: . + args: + bucket: "bucket" + accessKey: "sn_tool_access_key" + secretKey: "sn_tool_secret_key" + dockerfile: Dockerfile + ulimits: + nofile: + hard: 4096 + soft: 4096 + ports: + - "9000" + command: ["server", "--console-address", ":9001", "/minio/data"] From a7e113a67a9d8c4dd7298956174ff2f666a2c2b7 Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Tue, 12 Jul 2022 14:43:17 -0700 Subject: [PATCH 003/104] Rename public classes with 'Master' to 'ClusterManager' (#3619) Replace master terminology by cluster manager in the public Java APIs to support inclusive language. The PR deal with the public class name in the repository (except for those covered in issue #3542). * Replace Master to ClusterManager for all the classes, including all the references to the classes. The next PR will be like https://github.com/opensearch-project/OpenSearch/commit/de21446b0296665c15b0db0b106e1e1be20a08d2, adding back the classes in old name for backwards compatibility. List of classes that renamed in this PR: sever directory: interface LocalNodeMasterListener -> LocalNodeClusterManagerListener final class MasterNodeChangePredicate -> ClusterManagerNodeChangePredicate NotMasterException -> NotClusterManagerException NoMasterBlockService -> NoClusterManagerBlockService UnsafeBootstrapMasterCommand - UnsafeBootstrapClusterManagerCommand MasterService -> UnsafeBootstrapClusterManagerCommand MasterNotDiscoveredException -> ClusterManagerNotDiscoveredException RestMasterAction -> RestClusterManagerAction test/framework directory: FakeThreadPoolMasterService -> FakeThreadPoolClusterManagerService BlockMasterServiceOnMaster -> BlockClusterManagerServiceOnMaster BusyMasterServiceDisruption -> BusyClusterManagerServiceDisruption Signed-off-by: Tianli Feng --- .../index/rankeval/RankEvalResponseTests.java | 4 +- ...ansportClusterStateActionDisruptionIT.java | 6 +- .../admin/indices/exists/IndicesExistsIT.java | 4 +- .../cluster/ClusterStateDiffIT.java | 6 +- .../cluster/MinimumClusterManagerNodesIT.java | 26 +++--- .../cluster/NoClusterManagerNodeIT.java | 26 +++--- .../SpecificClusterManagerNodesIT.java | 8 +- .../UnsafeBootstrapAndDetachCommandIT.java | 35 +++++--- .../discovery/ClusterManagerDisruptionIT.java | 19 ++-- .../DedicatedClusterSnapshotRestoreIT.java | 4 +- .../main/java/org/opensearch/Assertions.java | 2 +- .../org/opensearch/OpenSearchException.java | 13 ++- .../org/opensearch/action/ActionModule.java | 4 +- .../health/TransportClusterHealthAction.java | 7 +- .../state/TransportClusterStateAction.java | 4 +- .../TransportClusterManagerNodeAction.java | 14 +-- ...=> ClusterManagerNodeChangePredicate.java} | 4 +- .../cluster/ClusterStateTaskListener.java | 6 +- ...a => LocalNodeClusterManagerListener.java} | 2 +- ...n.java => NotClusterManagerException.java} | 6 +- .../action/shard/ShardStateAction.java | 12 +-- .../cluster/coordination/Coordinator.java | 14 +-- .../cluster/coordination/JoinHelper.java | 21 ++--- .../coordination/JoinTaskExecutor.java | 8 +- ...java => NoClusterManagerBlockService.java} | 8 +- .../cluster/coordination/NodeToolCli.java | 4 +- ...UnsafeBootstrapClusterManagerCommand.java} | 4 +- .../routing/BatchedRerouteService.java | 9 +- .../service/ClusterApplierService.java | 4 +- ...ervice.java => ClusterManagerService.java} | 8 +- .../cluster/service/ClusterService.java | 26 +++--- .../common/settings/ClusterSettings.java | 12 +-- .../settings/ConsistentSettingsService.java | 8 +- .../common/util/concurrent/BaseFuture.java | 4 +- ...ClusterManagerNotDiscoveredException.java} | 10 +-- .../opensearch/discovery/DiscoveryModule.java | 4 +- .../gateway/LocalAllocateDangledIndices.java | 4 +- .../PersistentTasksClusterService.java | 4 +- ...ion.java => RestClusterManagerAction.java} | 4 +- .../snapshots/SnapshotsService.java | 10 +-- .../ExceptionSerializationTests.java | 12 +-- .../opensearch/OpenSearchExceptionTests.java | 6 +- .../RenamedTimeoutRequestParameterTests.java | 4 +- ...TransportResyncReplicationActionTests.java | 4 +- ...ransportClusterManagerNodeActionTests.java | 10 +-- ...rnalClusterInfoServiceSchedulingTests.java | 10 +-- .../action/shard/ShardStateActionTests.java | 4 +- .../coordination/CoordinatorTests.java | 8 +- .../cluster/coordination/JoinHelperTests.java | 4 +- ...nagerBlockServiceRenamedSettingTests.java} | 38 +++++--- ...=> NoClusterManagerBlockServiceTests.java} | 14 +-- .../cluster/coordination/NodeJoinTests.java | 24 ++--- .../service/ClusterApplierServiceTests.java | 8 +- ...terManagerServiceRenamedSettingTests.java} | 26 +++--- ...s.java => ClusterManagerServiceTests.java} | 90 ++++++++++--------- .../discovery/DiscoveryModuleTests.java | 6 +- ...ClusterStateServiceRandomUpdatesTests.java | 14 +-- .../snapshots/SnapshotResiliencyTests.java | 8 +- .../AbstractCoordinatorTestCase.java | 6 +- ... FakeThreadPoolClusterManagerService.java} | 8 +- .../opensearch/test/ClusterServiceUtils.java | 10 +-- .../opensearch/test/InternalTestCluster.java | 4 +- .../org/opensearch/test/RandomObjects.java | 4 +- ...lusterManagerServiceOnClusterManager.java} | 4 +- ... BusyClusterManagerServiceDisruption.java} | 4 +- ...ThreadPoolClusterManagerServiceTests.java} | 4 +- 66 files changed, 374 insertions(+), 338 deletions(-) rename server/src/main/java/org/opensearch/cluster/{MasterNodeChangePredicate.java => ClusterManagerNodeChangePredicate.java} (95%) rename server/src/main/java/org/opensearch/cluster/{LocalNodeMasterListener.java => LocalNodeClusterManagerListener.java} (96%) rename server/src/main/java/org/opensearch/cluster/{NotMasterException.java => NotClusterManagerException.java} (89%) rename server/src/main/java/org/opensearch/cluster/coordination/{NoMasterBlockService.java => NoClusterManagerBlockService.java} (93%) rename server/src/main/java/org/opensearch/cluster/coordination/{UnsafeBootstrapMasterCommand.java => UnsafeBootstrapClusterManagerCommand.java} (98%) rename server/src/main/java/org/opensearch/cluster/service/{MasterService.java => ClusterManagerService.java} (99%) rename server/src/main/java/org/opensearch/discovery/{MasterNotDiscoveredException.java => ClusterManagerNotDiscoveredException.java} (82%) rename server/src/main/java/org/opensearch/rest/action/cat/{RestMasterAction.java => RestClusterManagerAction.java} (97%) rename server/src/test/java/org/opensearch/cluster/coordination/{NoMasterBlockServiceRenamedSettingTests.java => NoClusterManagerBlockServiceRenamedSettingTests.java} (62%) rename server/src/test/java/org/opensearch/cluster/coordination/{NoMasterBlockServiceTests.java => NoClusterManagerBlockServiceTests.java} (85%) rename server/src/test/java/org/opensearch/cluster/service/{MasterServiceRenamedSettingTests.java => ClusterManagerServiceRenamedSettingTests.java} (68%) rename server/src/test/java/org/opensearch/cluster/service/{MasterServiceTests.java => ClusterManagerServiceTests.java} (92%) rename test/framework/src/main/java/org/opensearch/cluster/service/{FakeThreadPoolMasterService.java => FakeThreadPoolClusterManagerService.java} (96%) rename test/framework/src/main/java/org/opensearch/test/disruption/{BlockMasterServiceOnMaster.java => BlockClusterManagerServiceOnClusterManager.java} (96%) rename test/framework/src/main/java/org/opensearch/test/disruption/{BusyMasterServiceDisruption.java => BusyClusterManagerServiceDisruption.java} (95%) rename test/framework/src/test/java/org/opensearch/cluster/service/{FakeThreadPoolMasterServiceTests.java => FakeThreadPoolClusterManagerServiceTests.java} (97%) diff --git a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RankEvalResponseTests.java b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RankEvalResponseTests.java index 3d883b373d705..463df690be763 100644 --- a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RankEvalResponseTests.java +++ b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RankEvalResponseTests.java @@ -37,7 +37,7 @@ import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.cluster.block.ClusterBlockException; -import org.opensearch.cluster.coordination.NoMasterBlockService; +import org.opensearch.cluster.coordination.NoClusterManagerBlockService; import org.opensearch.common.ParsingException; import org.opensearch.common.breaker.CircuitBreaker; import org.opensearch.common.breaker.CircuitBreakingException; @@ -76,7 +76,7 @@ public class RankEvalResponseTests extends OpenSearchTestCase { private static final Exception[] RANDOM_EXCEPTIONS = new Exception[] { - new ClusterBlockException(singleton(NoMasterBlockService.NO_MASTER_BLOCK_WRITES)), + new ClusterBlockException(singleton(NoClusterManagerBlockService.NO_MASTER_BLOCK_WRITES)), new CircuitBreakingException("Data too large", 123, 456, CircuitBreaker.Durability.PERMANENT), new SearchParseException(SHARD_TARGET, "Parse failure", new XContentLocation(12, 98)), new IllegalArgumentException("Closed resource", new RuntimeException("Resource")), diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/state/TransportClusterStateActionDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/state/TransportClusterStateActionDisruptionIT.java index 8720de848bf14..2dfe784e47350 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/state/TransportClusterStateActionDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/state/TransportClusterStateActionDisruptionIT.java @@ -40,7 +40,7 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.discovery.MasterNotDiscoveredException; +import org.opensearch.discovery.ClusterManagerNotDiscoveredException; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; @@ -79,7 +79,7 @@ public void testNonLocalRequestAlwaysFindsClusterManager() throws Exception { final ClusterStateResponse clusterStateResponse; try { clusterStateResponse = clusterStateRequestBuilder.get(); - } catch (MasterNotDiscoveredException e) { + } catch (ClusterManagerNotDiscoveredException e) { return; // ok, we hit the disconnected node } assertNotNull("should always contain a cluster-manager node", clusterStateResponse.getState().nodes().getMasterNodeId()); @@ -129,7 +129,7 @@ public void testNonLocalRequestAlwaysFindsClusterManagerAndWaitsForMetadata() th final ClusterStateResponse clusterStateResponse; try { clusterStateResponse = clusterStateRequestBuilder.get(); - } catch (MasterNotDiscoveredException e) { + } catch (ClusterManagerNotDiscoveredException e) { return; // ok, we hit the disconnected node } if (clusterStateResponse.isWaitForTimedOut() == false) { diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/exists/IndicesExistsIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/exists/IndicesExistsIT.java index 7aade84fdef7f..f2db023a2ac01 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/exists/IndicesExistsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/exists/IndicesExistsIT.java @@ -34,7 +34,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.discovery.MasterNotDiscoveredException; +import org.opensearch.discovery.ClusterManagerNotDiscoveredException; import org.opensearch.gateway.GatewayService; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; @@ -54,7 +54,7 @@ public void testIndexExistsWithBlocksInPlace() throws IOException { assertRequestBuilderThrows( client(node).admin().indices().prepareExists("test").setClusterManagerNodeTimeout(TimeValue.timeValueSeconds(0)), - MasterNotDiscoveredException.class + ClusterManagerNotDiscoveredException.class ); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node)); // shut down node so that test properly cleans up diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterStateDiffIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterStateDiffIT.java index 8b510c6a13829..52c2cf9bfdcd0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterStateDiffIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterStateDiffIT.java @@ -37,7 +37,7 @@ import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.coordination.CoordinationMetadata; import org.opensearch.cluster.coordination.CoordinationMetadata.VotingConfigExclusion; -import org.opensearch.cluster.coordination.NoMasterBlockService; +import org.opensearch.cluster.coordination.NoClusterManagerBlockService; import org.opensearch.cluster.metadata.AliasMetadata; import org.opensearch.cluster.metadata.IndexGraveyard; import org.opensearch.cluster.metadata.IndexGraveyardTests; @@ -396,9 +396,9 @@ private ClusterState.Builder randomBlocks(ClusterState clusterState) { private ClusterBlock randomGlobalBlock() { switch (randomInt(2)) { case 0: - return NoMasterBlockService.NO_MASTER_BLOCK_ALL; + return NoClusterManagerBlockService.NO_MASTER_BLOCK_ALL; case 1: - return NoMasterBlockService.NO_MASTER_BLOCK_WRITES; + return NoClusterManagerBlockService.NO_MASTER_BLOCK_WRITES; default: return GatewayService.STATE_NOT_RECOVERED_BLOCK; } diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumClusterManagerNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumClusterManagerNodesIT.java index 8f512ade7465f..8da6040b6f996 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumClusterManagerNodesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumClusterManagerNodesIT.java @@ -39,7 +39,7 @@ import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.client.Client; import org.opensearch.cluster.coordination.FailedToCommitClusterStateException; -import org.opensearch.cluster.coordination.NoMasterBlockService; +import org.opensearch.cluster.coordination.NoClusterManagerBlockService; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; @@ -93,7 +93,7 @@ public void testTwoNodesNoClusterManagerBlock() throws Exception { logger.info("--> should be blocked, no cluster-manager..."); ClusterState state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); - assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(true)); + assertThat(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID), equalTo(true)); assertThat(state.nodes().getSize(), equalTo(1)); // verify that we still see the local node in the cluster state logger.info("--> start second node, cluster should be formed"); @@ -109,9 +109,9 @@ public void testTwoNodesNoClusterManagerBlock() throws Exception { assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); - assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); + assertThat(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); - assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); + assertThat(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); state = client().admin().cluster().prepareState().execute().actionGet().getState(); assertThat(state.nodes().getSize(), equalTo(2)); @@ -161,11 +161,11 @@ public void testTwoNodesNoClusterManagerBlock() throws Exception { assertBusy(() -> { ClusterState clusterState = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); - assertTrue(clusterState.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); + assertTrue(clusterState.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID)); }); state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); - assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(true)); + assertThat(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID), equalTo(true)); // verify that both nodes are still in the cluster state but there is no cluster-manager assertThat(state.nodes().getSize(), equalTo(2)); assertThat(state.nodes().getMasterNode(), equalTo(null)); @@ -184,9 +184,9 @@ public void testTwoNodesNoClusterManagerBlock() throws Exception { assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); - assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); + assertThat(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); - assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); + assertThat(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); state = client().admin().cluster().prepareState().execute().actionGet().getState(); assertThat(state.nodes().getSize(), equalTo(2)); @@ -214,7 +214,7 @@ public void testTwoNodesNoClusterManagerBlock() throws Exception { assertBusy(() -> { ClusterState state1 = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); - assertThat(state1.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(true)); + assertThat(state1.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID), equalTo(true)); }); logger.info("--> starting the previous cluster-manager node again..."); @@ -232,9 +232,9 @@ public void testTwoNodesNoClusterManagerBlock() throws Exception { assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); - assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); + assertThat(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); - assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); + assertThat(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); state = client().admin().cluster().prepareState().execute().actionGet().getState(); assertThat(state.nodes().getSize(), equalTo(2)); @@ -262,7 +262,7 @@ public void testThreeNodesNoClusterManagerBlock() throws Exception { assertBusy(() -> { for (Client client : clients()) { ClusterState state1 = client.admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); - assertThat(state1.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(true)); + assertThat(state1.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID), equalTo(true)); } }); @@ -321,7 +321,7 @@ public void testThreeNodesNoClusterManagerBlock() throws Exception { // spin here to wait till the state is set assertBusy(() -> { ClusterState st = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); - assertThat(st.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(true)); + assertThat(st.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID), equalTo(true)); }); logger.info("--> start back the 2 nodes "); diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/NoClusterManagerNodeIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/NoClusterManagerNodeIT.java index 26852e59d1c86..0a4d9f8564b61 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/NoClusterManagerNodeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/NoClusterManagerNodeIT.java @@ -44,13 +44,13 @@ import org.opensearch.client.Requests; import org.opensearch.cluster.action.index.MappingUpdatedAction; import org.opensearch.cluster.block.ClusterBlockException; -import org.opensearch.cluster.coordination.NoMasterBlockService; +import org.opensearch.cluster.coordination.NoClusterManagerBlockService; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.discovery.MasterNotDiscoveredException; +import org.opensearch.discovery.ClusterManagerNotDiscoveredException; import org.opensearch.plugins.Plugin; import org.opensearch.rest.RestStatus; import org.opensearch.script.Script; @@ -90,7 +90,7 @@ protected Collection> nodePlugins() { public void testNoClusterManagerActions() throws Exception { Settings settings = Settings.builder() .put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), true) - .put(NoMasterBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "all") + .put(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "all") .build(); final TimeValue timeout = TimeValue.timeValueMillis(10); @@ -117,7 +117,7 @@ public void testNoClusterManagerActions() throws Exception { .execute() .actionGet() .getState(); - assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); + assertTrue(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID)); }); assertRequestBuilderThrows( @@ -233,9 +233,9 @@ void checkUpdateAction(boolean autoCreateIndex, TimeValue timeout, ActionRequest // we clean the metadata when loosing a cluster-manager, therefore all operations on indices will auto create it, if allowed try { builder.get(); - fail("expected ClusterBlockException or MasterNotDiscoveredException"); - } catch (ClusterBlockException | MasterNotDiscoveredException e) { - if (e instanceof MasterNotDiscoveredException) { + fail("expected ClusterBlockException or ClusterManagerNotDiscoveredException"); + } catch (ClusterBlockException | ClusterManagerNotDiscoveredException e) { + if (e instanceof ClusterManagerNotDiscoveredException) { assertTrue(autoCreateIndex); } else { assertFalse(autoCreateIndex); @@ -256,7 +256,7 @@ void checkWriteAction(ActionRequestBuilder builder) { public void testNoClusterManagerActionsWriteClusterManagerBlock() throws Exception { Settings settings = Settings.builder() .put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), false) - .put(NoMasterBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "write") + .put(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "write") .build(); final List nodes = internalCluster().startNodes(3, settings); @@ -288,7 +288,7 @@ public void testNoClusterManagerActionsWriteClusterManagerBlock() throws Excepti assertBusy(() -> { ClusterState state = clientToClusterManagerlessNode.admin().cluster().prepareState().setLocal(true).get().getState(); - assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); + assertTrue(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID)); }); GetResponse getResponse = clientToClusterManagerlessNode.prepareGet("test1", "1").get(); @@ -340,7 +340,7 @@ public void testNoClusterManagerActionsWriteClusterManagerBlock() throws Excepti public void testNoClusterManagerActionsMetadataWriteClusterManagerBlock() throws Exception { Settings settings = Settings.builder() - .put(NoMasterBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "metadata_write") + .put(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "metadata_write") .put(MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING.getKey(), "100ms") .build(); @@ -387,7 +387,7 @@ public void testNoClusterManagerActionsMetadataWriteClusterManagerBlock() throws assertBusy(() -> { for (String node : nodesWithShards) { ClusterState state = client(node).admin().cluster().prepareState().setLocal(true).get().getState(); - assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); + assertTrue(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID)); } }); @@ -429,7 +429,7 @@ public void testNoClusterManagerActionsMetadataWriteClusterManagerBlock() throws // dynamic mapping updates fail expectThrows( - MasterNotDiscoveredException.class, + ClusterManagerNotDiscoveredException.class, () -> client(randomFrom(nodesWithShards)).prepareIndex("test1") .setId("1") .setSource(XContentFactory.jsonBuilder().startObject().field("new_field", "value").endObject()) @@ -439,7 +439,7 @@ public void testNoClusterManagerActionsMetadataWriteClusterManagerBlock() throws // dynamic index creation fails expectThrows( - MasterNotDiscoveredException.class, + ClusterManagerNotDiscoveredException.class, () -> client(randomFrom(nodesWithShards)).prepareIndex("test2") .setId("1") .setSource(XContentFactory.jsonBuilder().startObject().endObject()) diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/SpecificClusterManagerNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/SpecificClusterManagerNodesIT.java index 2f81299d76db9..ebdff162ca4e2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/SpecificClusterManagerNodesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/SpecificClusterManagerNodesIT.java @@ -36,7 +36,7 @@ import org.opensearch.action.admin.cluster.configuration.AddVotingConfigExclusionsAction; import org.opensearch.action.admin.cluster.configuration.AddVotingConfigExclusionsRequest; import org.opensearch.common.settings.Settings; -import org.opensearch.discovery.MasterNotDiscoveredException; +import org.opensearch.discovery.ClusterManagerNotDiscoveredException; import org.opensearch.index.query.QueryBuilders; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; @@ -73,7 +73,7 @@ public void testSimpleOnlyClusterManagerNodeElection() throws IOException { nullValue() ); fail("should not be able to find cluster-manager"); - } catch (MasterNotDiscoveredException e) { + } catch (ClusterManagerNotDiscoveredException e) { // all is well, no cluster-manager elected } logger.info("--> start cluster-manager node"); @@ -123,7 +123,7 @@ public void testSimpleOnlyClusterManagerNodeElection() throws IOException { nullValue() ); fail("should not be able to find cluster-manager"); - } catch (MasterNotDiscoveredException e) { + } catch (ClusterManagerNotDiscoveredException e) { // all is well, no cluster-manager elected } @@ -177,7 +177,7 @@ public void testElectOnlyBetweenClusterManagerNodes() throws Exception { nullValue() ); fail("should not be able to find cluster-manager"); - } catch (MasterNotDiscoveredException e) { + } catch (ClusterManagerNotDiscoveredException e) { // all is well, no cluster-manager elected } logger.info("--> start cluster-manager node (1)"); diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java index b0153a1306928..72e24b0396695 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java @@ -109,9 +109,15 @@ private MockTerminal executeCommand( } private MockTerminal unsafeBootstrap(Environment environment, boolean abort, Boolean applyClusterReadOnlyBlock) throws Exception { - final MockTerminal terminal = executeCommand(new UnsafeBootstrapMasterCommand(), environment, 0, abort, applyClusterReadOnlyBlock); - assertThat(terminal.getOutput(), containsString(UnsafeBootstrapMasterCommand.CONFIRMATION_MSG)); - assertThat(terminal.getOutput(), containsString(UnsafeBootstrapMasterCommand.CLUSTER_MANAGER_NODE_BOOTSTRAPPED_MSG)); + final MockTerminal terminal = executeCommand( + new UnsafeBootstrapClusterManagerCommand(), + environment, + 0, + abort, + applyClusterReadOnlyBlock + ); + assertThat(terminal.getOutput(), containsString(UnsafeBootstrapClusterManagerCommand.CONFIRMATION_MSG)); + assertThat(terminal.getOutput(), containsString(UnsafeBootstrapClusterManagerCommand.CLUSTER_MANAGER_NODE_BOOTSTRAPPED_MSG)); return terminal; } @@ -171,7 +177,7 @@ public void testBootstrapNotClusterManagerEligible() { final Environment environment = TestEnvironment.newEnvironment( Settings.builder().put(nonMasterNode(internalCluster().getDefaultSettings())).build() ); - expectThrows(() -> unsafeBootstrap(environment), UnsafeBootstrapMasterCommand.NOT_CLUSTER_MANAGER_NODE_MSG); + expectThrows(() -> unsafeBootstrap(environment), UnsafeBootstrapClusterManagerCommand.NOT_CLUSTER_MANAGER_NODE_MSG); } public void testBootstrapNoDataFolder() { @@ -214,7 +220,7 @@ public void testBootstrapNotBootstrappedCluster() throws Exception { ); assertBusy(() -> { ClusterState state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); - assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); + assertTrue(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID)); }); Settings dataPathSettings = internalCluster().dataPathSettings(node); @@ -224,7 +230,7 @@ public void testBootstrapNotBootstrappedCluster() throws Exception { Environment environment = TestEnvironment.newEnvironment( Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build() ); - expectThrows(() -> unsafeBootstrap(environment), UnsafeBootstrapMasterCommand.EMPTY_LAST_COMMITTED_VOTING_CONFIG_MSG); + expectThrows(() -> unsafeBootstrap(environment), UnsafeBootstrapClusterManagerCommand.EMPTY_LAST_COMMITTED_VOTING_CONFIG_MSG); } public void testBootstrapNoClusterState() throws IOException { @@ -332,14 +338,17 @@ public void test3ClusterManagerNodes2Failed() throws Exception { .execute() .actionGet() .getState(); - assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); + assertTrue(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID)); }); logger.info("--> try to unsafely bootstrap 1st cluster-manager-eligible node, while node lock is held"); Environment environmentClusterManager1 = TestEnvironment.newEnvironment( Settings.builder().put(internalCluster().getDefaultSettings()).put(clusterManager1DataPathSettings).build() ); - expectThrows(() -> unsafeBootstrap(environmentClusterManager1), UnsafeBootstrapMasterCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG); + expectThrows( + () -> unsafeBootstrap(environmentClusterManager1), + UnsafeBootstrapClusterManagerCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG + ); logger.info("--> stop 1st cluster-manager-eligible node and data-only node"); NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); @@ -356,7 +365,7 @@ public void test3ClusterManagerNodes2Failed() throws Exception { containsString( String.format( Locale.ROOT, - UnsafeBootstrapMasterCommand.CLUSTER_STATE_TERM_VERSION_MSG_FORMAT, + UnsafeBootstrapClusterManagerCommand.CLUSTER_STATE_TERM_VERSION_MSG_FORMAT, metadata.coordinationMetadata().term(), metadata.version() ) @@ -385,8 +394,10 @@ public void test3ClusterManagerNodes2Failed() throws Exception { .execute() .actionGet() .getState(); - assertFalse(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); - assertTrue(state.metadata().persistentSettings().getAsBoolean(UnsafeBootstrapMasterCommand.UNSAFE_BOOTSTRAP.getKey(), false)); + assertFalse(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID)); + assertTrue( + state.metadata().persistentSettings().getAsBoolean(UnsafeBootstrapClusterManagerCommand.UNSAFE_BOOTSTRAP.getKey(), false) + ); }); List bootstrappedNodes = new ArrayList<>(); @@ -497,7 +508,7 @@ public void testNoInitialBootstrapAfterDetach() throws Exception { ); ClusterState state = internalCluster().client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); - assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); + assertTrue(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID)); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node)); } diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java index d2f406a0c5d98..6c055c4015ff0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java @@ -37,13 +37,13 @@ import org.opensearch.action.bulk.BulkRequestBuilder; import org.opensearch.action.bulk.BulkResponse; import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.coordination.NoMasterBlockService; +import org.opensearch.cluster.coordination.NoClusterManagerBlockService; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentType; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.disruption.BlockMasterServiceOnMaster; +import org.opensearch.test.disruption.BlockClusterManagerServiceOnClusterManager; import org.opensearch.test.disruption.IntermittentLongGCDisruption; import org.opensearch.test.disruption.NetworkDisruption; import org.opensearch.test.disruption.NetworkDisruption.TwoPartitions; @@ -198,7 +198,10 @@ public void testIsolateClusterManagerAndVerifyClusterStateConsensus() throws Exc * Verify that the proper block is applied when nodes lose their cluster-manager */ public void testVerifyApiBlocksDuringPartition() throws Exception { - internalCluster().startNodes(3, Settings.builder().putNull(NoMasterBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey()).build()); + internalCluster().startNodes( + 3, + Settings.builder().putNull(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey()).build() + ); // Makes sure that the get request can be executed on each node locally: assertAcked( @@ -227,7 +230,7 @@ public void testVerifyApiBlocksDuringPartition() throws Exception { // continuously ping until network failures have been resolved. However // It may a take a bit before the node detects it has been cut off from the elected cluster-manager logger.info("waiting for isolated node [{}] to have no cluster-manager", isolatedNode); - assertNoClusterManager(isolatedNode, NoMasterBlockService.NO_MASTER_BLOCK_METADATA_WRITES, TimeValue.timeValueSeconds(30)); + assertNoClusterManager(isolatedNode, NoClusterManagerBlockService.NO_MASTER_BLOCK_METADATA_WRITES, TimeValue.timeValueSeconds(30)); logger.info("wait until elected cluster-manager has been removed and a new 2 node cluster was from (via [{}])", isolatedNode); ensureStableCluster(2, nonIsolatedNode); @@ -258,13 +261,13 @@ public void testVerifyApiBlocksDuringPartition() throws Exception { logger.info( "Verify no cluster-manager block with {} set to {}", - NoMasterBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), + NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "all" ); client().admin() .cluster() .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(NoMasterBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "all")) + .setTransientSettings(Settings.builder().put(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "all")) .get(); networkDisruption.startDisrupting(); @@ -273,7 +276,7 @@ public void testVerifyApiBlocksDuringPartition() throws Exception { // continuously ping until network failures have been resolved. However // It may a take a bit before the node detects it has been cut off from the elected cluster-manager logger.info("waiting for isolated node [{}] to have no cluster-manager", isolatedNode); - assertNoClusterManager(isolatedNode, NoMasterBlockService.NO_MASTER_BLOCK_ALL, TimeValue.timeValueSeconds(30)); + assertNoClusterManager(isolatedNode, NoClusterManagerBlockService.NO_MASTER_BLOCK_ALL, TimeValue.timeValueSeconds(30)); // make sure we have stable cluster & cross partition recoveries are canceled by the removal of the missing node // the unresponsive partition causes recoveries to only time out after 15m (default) and these will cause @@ -305,7 +308,7 @@ public void testMappingTimeout() throws Exception { .setTransientSettings(Settings.builder().put("indices.mapping.dynamic_timeout", "1ms")) ); - ServiceDisruptionScheme disruption = new BlockMasterServiceOnMaster(random()); + ServiceDisruptionScheme disruption = new BlockClusterManagerServiceOnClusterManager(random()); setDisruptionScheme(disruption); disruption.startDisrupting(); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 2eca8555e1388..a4afdc0dba85c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -94,7 +94,7 @@ import org.opensearch.test.OpenSearchIntegTestCase.Scope; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.TestCustomMetadata; -import org.opensearch.test.disruption.BusyMasterServiceDisruption; +import org.opensearch.test.disruption.BusyClusterManagerServiceDisruption; import org.opensearch.test.disruption.ServiceDisruptionScheme; import org.opensearch.test.rest.FakeRestRequest; import org.opensearch.test.transport.MockTransportService; @@ -1157,7 +1157,7 @@ public void testDataNodeRestartWithBusyClusterManagerDuringSnapshot() throws Exc final String dataNode = blockNodeWithIndex("test-repo", "test-idx"); logger.info("--> snapshot"); - ServiceDisruptionScheme disruption = new BusyMasterServiceDisruption(random(), Priority.HIGH); + ServiceDisruptionScheme disruption = new BusyClusterManagerServiceDisruption(random(), Priority.HIGH); setDisruptionScheme(disruption); client(internalCluster().getMasterName()).admin() .cluster() diff --git a/server/src/main/java/org/opensearch/Assertions.java b/server/src/main/java/org/opensearch/Assertions.java index 843853ec8b20b..dd2fcf9481a3c 100644 --- a/server/src/main/java/org/opensearch/Assertions.java +++ b/server/src/main/java/org/opensearch/Assertions.java @@ -35,7 +35,7 @@ /** * Provides a static final field that can be used to check if assertions are enabled. Since this field might be used elsewhere to check if * assertions are enabled, if you are running with assertions enabled for specific packages or classes, you should enable assertions on this - * class too (e.g., {@code -ea org.opensearch.Assertions -ea org.opensearch.cluster.service.MasterService}). + * class too (e.g., {@code -ea org.opensearch.Assertions -ea org.opensearch.cluster.service.ClusterManagerService}). * * @opensearch.internal */ diff --git a/server/src/main/java/org/opensearch/OpenSearchException.java b/server/src/main/java/org/opensearch/OpenSearchException.java index 2de25aa1cd6dc..4c12d031a028f 100644 --- a/server/src/main/java/org/opensearch/OpenSearchException.java +++ b/server/src/main/java/org/opensearch/OpenSearchException.java @@ -33,6 +33,7 @@ package org.opensearch; import org.opensearch.action.support.replication.ReplicationOperation; +import org.opensearch.cluster.NotClusterManagerException; import org.opensearch.cluster.action.shard.ShardStateAction; import org.opensearch.common.CheckedFunction; import org.opensearch.common.Nullable; @@ -46,6 +47,7 @@ import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentParseException; import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.discovery.ClusterManagerNotDiscoveredException; import org.opensearch.index.Index; import org.opensearch.index.shard.ShardId; import org.opensearch.rest.RestStatus; @@ -789,8 +791,8 @@ private enum OpenSearchExceptionHandle { UNKNOWN_VERSION_ADDED ), CLUSTER_MANAGER_NOT_DISCOVERED_EXCEPTION( - org.opensearch.discovery.MasterNotDiscoveredException.class, - org.opensearch.discovery.MasterNotDiscoveredException::new, + ClusterManagerNotDiscoveredException.class, + ClusterManagerNotDiscoveredException::new, 3, UNKNOWN_VERSION_ADDED ), @@ -1499,12 +1501,7 @@ private enum OpenSearchExceptionHandle { 143, UNKNOWN_VERSION_ADDED ), - NOT_CLUSTER_MANAGER_EXCEPTION( - org.opensearch.cluster.NotMasterException.class, - org.opensearch.cluster.NotMasterException::new, - 144, - UNKNOWN_VERSION_ADDED - ), + NOT_CLUSTER_MANAGER_EXCEPTION(NotClusterManagerException.class, NotClusterManagerException::new, 144, UNKNOWN_VERSION_ADDED), STATUS_EXCEPTION( org.opensearch.OpenSearchStatusException.class, org.opensearch.OpenSearchStatusException::new, diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index 790f8f6cbdc36..2a3c82991d9bb 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -368,7 +368,7 @@ import org.opensearch.rest.action.cat.RestFielddataAction; import org.opensearch.rest.action.cat.RestHealthAction; import org.opensearch.rest.action.cat.RestIndicesAction; -import org.opensearch.rest.action.cat.RestMasterAction; +import org.opensearch.rest.action.cat.RestClusterManagerAction; import org.opensearch.rest.action.cat.RestNodeAttrsAction; import org.opensearch.rest.action.cat.RestNodesAction; import org.opensearch.rest.action.cat.RestPluginsAction; @@ -809,7 +809,7 @@ public void initRestHandlers(Supplier nodesInCluster) { // CAT API registerHandler.accept(new RestAllocationAction()); registerHandler.accept(new RestShardsAction()); - registerHandler.accept(new RestMasterAction()); + registerHandler.accept(new RestClusterManagerAction()); registerHandler.accept(new RestNodesAction()); registerHandler.accept(new RestTasksAction(nodesInCluster)); registerHandler.accept(new RestIndicesAction()); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java index 6120317dfeace..8a2ad77ac1693 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -44,7 +44,7 @@ import org.opensearch.cluster.ClusterStateObserver; import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.LocalClusterUpdateTask; -import org.opensearch.cluster.NotMasterException; +import org.opensearch.cluster.NotClusterManagerException; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; @@ -226,8 +226,9 @@ public void onNoLongerMaster(String source) { "stopped being cluster-manager while waiting for events with priority [{}]. retrying.", request.waitForEvents() ); - // TransportMasterNodeAction implements the retry logic, which is triggered by passing a NotMasterException - listener.onFailure(new NotMasterException("no longer cluster-manager. source: [" + source + "]")); + // TransportClusterManagerNodeAction implements the retry logic, which is triggered by passing a + // NotClusterManagerException + listener.onFailure(new NotClusterManagerException("no longer cluster-manager. source: [" + source + "]")); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java index 673153c40bf46..b2e09629fc501 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -40,7 +40,7 @@ import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateObserver; -import org.opensearch.cluster.NotMasterException; +import org.opensearch.cluster.NotClusterManagerException; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; @@ -142,7 +142,7 @@ public void onNewClusterState(ClusterState newState) { ActionListener.completeWith(listener, () -> buildResponse(request, newState)); } else { listener.onFailure( - new NotMasterException( + new NotClusterManagerException( "cluster-manager stepped down waiting for metadata version " + request.waitForMetadataVersion() ) ); diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java index 46c93a8d33c7b..05b2f5eb168d8 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java @@ -43,8 +43,8 @@ import org.opensearch.action.support.HandledTransportAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateObserver; -import org.opensearch.cluster.MasterNodeChangePredicate; -import org.opensearch.cluster.NotMasterException; +import org.opensearch.cluster.ClusterManagerNodeChangePredicate; +import org.opensearch.cluster.NotClusterManagerException; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.coordination.FailedToCommitClusterStateException; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; @@ -54,7 +54,7 @@ import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.Writeable; import org.opensearch.common.unit.TimeValue; -import org.opensearch.discovery.MasterNotDiscoveredException; +import org.opensearch.discovery.ClusterManagerNotDiscoveredException; import org.opensearch.node.NodeClosedException; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; @@ -186,7 +186,7 @@ protected void doStart(ClusterState clusterState) { } } else { ActionListener delegate = ActionListener.delegateResponse(listener, (delegatedListener, t) -> { - if (t instanceof FailedToCommitClusterStateException || t instanceof NotMasterException) { + if (t instanceof FailedToCommitClusterStateException || t instanceof NotClusterManagerException) { logger.debug( () -> new ParameterizedMessage( "master could not publish cluster state or " @@ -243,7 +243,7 @@ public void handleException(final TransportException exp) { } private void retryOnMasterChange(ClusterState state, Throwable failure) { - retry(state, failure, MasterNodeChangePredicate.build(state)); + retry(state, failure, ClusterManagerNodeChangePredicate.build(state)); } private void retry(ClusterState state, final Throwable failure, final Predicate statePredicate) { @@ -252,7 +252,7 @@ private void retry(ClusterState state, final Throwable failure, final Predicate< - startTime); if (remainingTimeoutMS <= 0) { logger.debug(() -> new ParameterizedMessage("timed out before retrying [{}] after failure", actionName), failure); - listener.onFailure(new MasterNotDiscoveredException(failure)); + listener.onFailure(new ClusterManagerNotDiscoveredException(failure)); return; } this.observer = new ClusterStateObserver( @@ -280,7 +280,7 @@ public void onTimeout(TimeValue timeout) { () -> new ParameterizedMessage("timed out while retrying [{}] after failure (timeout [{}])", actionName, timeout), failure ); - listener.onFailure(new MasterNotDiscoveredException(failure)); + listener.onFailure(new ClusterManagerNotDiscoveredException(failure)); } }, statePredicate); } diff --git a/server/src/main/java/org/opensearch/cluster/MasterNodeChangePredicate.java b/server/src/main/java/org/opensearch/cluster/ClusterManagerNodeChangePredicate.java similarity index 95% rename from server/src/main/java/org/opensearch/cluster/MasterNodeChangePredicate.java rename to server/src/main/java/org/opensearch/cluster/ClusterManagerNodeChangePredicate.java index 7ff5161c443a1..ecb99e06f3ef0 100644 --- a/server/src/main/java/org/opensearch/cluster/MasterNodeChangePredicate.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterManagerNodeChangePredicate.java @@ -41,9 +41,9 @@ * * @opensearch.internal */ -public final class MasterNodeChangePredicate { +public final class ClusterManagerNodeChangePredicate { - private MasterNodeChangePredicate() { + private ClusterManagerNodeChangePredicate() { } diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java index 91137a7efae92..6a12e0cd5ed46 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java @@ -31,7 +31,7 @@ package org.opensearch.cluster; -import org.opensearch.cluster.service.MasterService; +import org.opensearch.cluster.service.ClusterManagerService; import java.util.List; @@ -49,10 +49,10 @@ public interface ClusterStateTaskListener { /** * called when the task was rejected because the local node is no longer cluster-manager. - * Used only for tasks submitted to {@link MasterService}. + * Used only for tasks submitted to {@link ClusterManagerService}. */ default void onNoLongerMaster(String source) { - onFailure(source, new NotMasterException("no longer cluster-manager. source: [" + source + "]")); + onFailure(source, new NotClusterManagerException("no longer cluster-manager. source: [" + source + "]")); } /** diff --git a/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java b/server/src/main/java/org/opensearch/cluster/LocalNodeClusterManagerListener.java similarity index 96% rename from server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java rename to server/src/main/java/org/opensearch/cluster/LocalNodeClusterManagerListener.java index bec2674f5d549..42a6438ff125e 100644 --- a/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java +++ b/server/src/main/java/org/opensearch/cluster/LocalNodeClusterManagerListener.java @@ -37,7 +37,7 @@ * * @opensearch.internal */ -public interface LocalNodeMasterListener extends ClusterStateListener { +public interface LocalNodeClusterManagerListener extends ClusterStateListener { /** * Called when local node is elected to be the cluster-manager diff --git a/server/src/main/java/org/opensearch/cluster/NotMasterException.java b/server/src/main/java/org/opensearch/cluster/NotClusterManagerException.java similarity index 89% rename from server/src/main/java/org/opensearch/cluster/NotMasterException.java rename to server/src/main/java/org/opensearch/cluster/NotClusterManagerException.java index a855f3b665ac3..522bad0f9d682 100644 --- a/server/src/main/java/org/opensearch/cluster/NotMasterException.java +++ b/server/src/main/java/org/opensearch/cluster/NotClusterManagerException.java @@ -43,13 +43,13 @@ * * @opensearch.internal */ -public class NotMasterException extends OpenSearchException { +public class NotClusterManagerException extends OpenSearchException { - public NotMasterException(String msg) { + public NotClusterManagerException(String msg) { super(msg); } - public NotMasterException(StreamInput in) throws IOException { + public NotClusterManagerException(StreamInput in) throws IOException { super(in); } diff --git a/server/src/main/java/org/opensearch/cluster/action/shard/ShardStateAction.java b/server/src/main/java/org/opensearch/cluster/action/shard/ShardStateAction.java index 93ddd74322ce9..81a365df9866d 100644 --- a/server/src/main/java/org/opensearch/cluster/action/shard/ShardStateAction.java +++ b/server/src/main/java/org/opensearch/cluster/action/shard/ShardStateAction.java @@ -44,8 +44,8 @@ import org.opensearch.cluster.ClusterStateTaskConfig; import org.opensearch.cluster.ClusterStateTaskExecutor; import org.opensearch.cluster.ClusterStateTaskListener; -import org.opensearch.cluster.MasterNodeChangePredicate; -import org.opensearch.cluster.NotMasterException; +import org.opensearch.cluster.ClusterManagerNodeChangePredicate; +import org.opensearch.cluster.NotClusterManagerException; import org.opensearch.cluster.coordination.FailedToCommitClusterStateException; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; @@ -183,7 +183,7 @@ private void sendShardAction( ) { ClusterStateObserver observer = new ClusterStateObserver(currentState, clusterService, null, logger, threadPool.getThreadContext()); DiscoveryNode clusterManagerNode = currentState.nodes().getMasterNode(); - Predicate changePredicate = MasterNodeChangePredicate.build(currentState); + Predicate changePredicate = ClusterManagerNodeChangePredicate.build(currentState); if (clusterManagerNode == null) { logger.warn("no cluster-manager known for action [{}] for shard entry [{}]", actionName, request); waitForNewClusterManagerAndRetry(actionName, observer, request, listener, changePredicate); @@ -223,7 +223,7 @@ public void handleException(TransportException exp) { } private static Class[] CLUSTER_MANAGER_CHANNEL_EXCEPTIONS = new Class[] { - NotMasterException.class, + NotClusterManagerException.class, ConnectTransportException.class, FailedToCommitClusterStateException.class }; @@ -388,7 +388,7 @@ public void onFailure(String source, Exception e) { public void onNoLongerMaster(String source) { logger.error("{} no longer cluster-manager while failing shard [{}]", request.shardId, request); try { - channel.sendResponse(new NotMasterException(source)); + channel.sendResponse(new NotClusterManagerException(source)); } catch (Exception channelException) { logger.warn( () -> new ParameterizedMessage( @@ -817,7 +817,7 @@ public ClusterTasksResult execute(ClusterState currentState, @Override public void onFailure(String source, Exception e) { - if (e instanceof FailedToCommitClusterStateException || e instanceof NotMasterException) { + if (e instanceof FailedToCommitClusterStateException || e instanceof NotClusterManagerException) { logger.debug(() -> new ParameterizedMessage("failure during [{}]", source), e); } else { logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java index 5a4f9be40600d..ee71307e82ec7 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java @@ -58,7 +58,7 @@ import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.service.ClusterApplier; import org.opensearch.cluster.service.ClusterApplier.ClusterApplyListener; -import org.opensearch.cluster.service.MasterService; +import org.opensearch.cluster.service.ClusterManagerService; import org.opensearch.common.Booleans; import org.opensearch.common.Nullable; import org.opensearch.common.Priority; @@ -105,7 +105,7 @@ import java.util.stream.Stream; import java.util.stream.StreamSupport; -import static org.opensearch.cluster.coordination.NoMasterBlockService.NO_MASTER_BLOCK_ID; +import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_MASTER_BLOCK_ID; import static org.opensearch.gateway.ClusterStateUpdaters.hideStateIfNotRecovered; import static org.opensearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; import static org.opensearch.monitor.StatusInfo.Status.UNHEALTHY; @@ -141,12 +141,12 @@ public class Coordinator extends AbstractLifecycleComponent implements Discovery private final boolean singleNodeDiscovery; private final ElectionStrategy electionStrategy; private final TransportService transportService; - private final MasterService clusterManagerService; + private final ClusterManagerService clusterManagerService; private final AllocationService allocationService; private final JoinHelper joinHelper; private final NodeRemovalClusterStateTaskExecutor nodeRemovalExecutor; private final Supplier persistedStateSupplier; - private final NoMasterBlockService noClusterManagerBlockService; + private final NoClusterManagerBlockService noClusterManagerBlockService; final Object mutex = new Object(); // package-private to allow tests to call methods that assert that the mutex is held private final SetOnce coordinationState = new SetOnce<>(); // initialized on start-up (see doStart) private volatile ClusterState applierState; // the state that should be exposed to the cluster state applier @@ -191,7 +191,7 @@ public Coordinator( TransportService transportService, NamedWriteableRegistry namedWriteableRegistry, AllocationService allocationService, - MasterService clusterManagerService, + ClusterManagerService clusterManagerService, Supplier persistedStateSupplier, SeedHostsProvider seedHostsProvider, ClusterApplier clusterApplier, @@ -222,7 +222,7 @@ public Coordinator( nodeHealthService ); this.persistedStateSupplier = persistedStateSupplier; - this.noClusterManagerBlockService = new NoMasterBlockService(settings, clusterSettings); + this.noClusterManagerBlockService = new NoClusterManagerBlockService(settings, clusterSettings); this.lastKnownLeader = Optional.empty(); this.lastJoin = Optional.empty(); this.joinAccumulator = new InitialJoinAccumulator(); @@ -911,7 +911,7 @@ assert getLocalNode().equals(applierState.nodes().getMasterNode()) final boolean activePublication = currentPublication.map(CoordinatorPublication::isActiveForCurrentLeader).orElse(false); if (becomingClusterManager && activePublication == false) { - // cluster state update task to become cluster-manager is submitted to MasterService, + // cluster state update task to become cluster-manager is submitted to ClusterManagerService, // but publication has not started yet assert followersChecker.getKnownFollowers().isEmpty() : followersChecker.getKnownFollowers(); } else { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java index 8ec215719b642..aeff856bef51a 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java @@ -40,13 +40,13 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateTaskConfig; import org.opensearch.cluster.ClusterStateTaskListener; -import org.opensearch.cluster.NotMasterException; +import org.opensearch.cluster.NotClusterManagerException; import org.opensearch.cluster.coordination.Coordinator.Mode; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.RerouteService; import org.opensearch.cluster.routing.allocation.AllocationService; -import org.opensearch.cluster.service.MasterService; +import org.opensearch.cluster.service.ClusterManagerService; import org.opensearch.common.Priority; import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.stream.StreamInput; @@ -106,7 +106,7 @@ public class JoinHelper { Setting.Property.Deprecated ); - private final MasterService masterService; + private final ClusterManagerService clusterManagerService; private final TransportService transportService; private volatile JoinTaskExecutor joinTaskExecutor; @@ -122,7 +122,7 @@ public class JoinHelper { JoinHelper( Settings settings, AllocationService allocationService, - MasterService masterService, + ClusterManagerService clusterManagerService, TransportService transportService, LongSupplier currentTermSupplier, Supplier currentStateSupplier, @@ -132,7 +132,7 @@ public class JoinHelper { RerouteService rerouteService, NodeHealthService nodeHealthService ) { - this.masterService = masterService; + this.clusterManagerService = clusterManagerService; this.transportService = transportService; this.nodeHealthService = nodeHealthService; this.joinTimeout = JOIN_TIMEOUT_SETTING.get(settings); @@ -143,13 +143,14 @@ public class JoinHelper { @Override public ClusterTasksResult execute(ClusterState currentState, List joiningTasks) throws Exception { - // The current state that MasterService uses might have been updated by a (different) cluster-manager in a higher term + // The current state that ClusterManagerService uses might have been updated by a (different) cluster-manager in a higher + // term // already // Stop processing the current cluster state update, as there's no point in continuing to compute it as // it will later be rejected by Coordinator.publish(...) anyhow if (currentState.term() > term) { logger.trace("encountered higher term {} than current {}, there is a newer cluster-manager", currentState.term(), term); - throw new NotMasterException( + throw new NotClusterManagerException( "Higher term encountered (current: " + currentState.term() + " > used: " @@ -284,7 +285,7 @@ static Level getLogLevel(TransportException e) { Throwable cause = e.unwrapCause(); if (cause instanceof CoordinationStateRejectedException || cause instanceof FailedToCommitClusterStateException - || cause instanceof NotMasterException) { + || cause instanceof NotClusterManagerException) { return Level.DEBUG; } return Level.INFO; @@ -454,7 +455,7 @@ class LeaderJoinAccumulator implements JoinAccumulator { public void handleJoinRequest(DiscoveryNode sender, JoinCallback joinCallback) { final JoinTaskExecutor.Task task = new JoinTaskExecutor.Task(sender, "join existing leader"); assert joinTaskExecutor != null; - masterService.submitStateUpdateTask( + clusterManagerService.submitStateUpdateTask( "node-join", task, ClusterStateTaskConfig.build(Priority.URGENT), @@ -539,7 +540,7 @@ public void close(Mode newMode) { pendingAsTasks.put(JoinTaskExecutor.newBecomeMasterTask(), (source, e) -> {}); pendingAsTasks.put(JoinTaskExecutor.newFinishElectionTask(), (source, e) -> {}); joinTaskExecutor = joinTaskExecutorGenerator.get(); - masterService.submitStateUpdateTasks( + clusterManagerService.submitStateUpdateTasks( stateUpdateSource, pendingAsTasks, ClusterStateTaskConfig.build(Priority.URGENT), diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java index 766dd2d11b03e..629a016e0eab1 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java @@ -37,7 +37,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateTaskExecutor; -import org.opensearch.cluster.NotMasterException; +import org.opensearch.cluster.NotClusterManagerException; import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; @@ -156,7 +156,7 @@ public ClusterTasksResult execute(ClusterState currentState, List jo "processing node joins, but we are not the cluster-manager. current cluster-manager: {}", currentNodes.getMasterNode() ); - throw new NotMasterException("Node [" + currentNodes.getLocalNode() + "] not cluster-manager for join request"); + throw new NotClusterManagerException("Node [" + currentNodes.getLocalNode() + "] not cluster-manager for join request"); } else { newState = ClusterState.builder(currentState); } @@ -278,7 +278,9 @@ protected ClusterState.Builder becomeClusterManagerAndTrimConflictingNodes(Clust // or removed by us above ClusterState tmpState = ClusterState.builder(currentState) .nodes(nodesBuilder) - .blocks(ClusterBlocks.builder().blocks(currentState.blocks()).removeGlobalBlock(NoMasterBlockService.NO_MASTER_BLOCK_ID)) + .blocks( + ClusterBlocks.builder().blocks(currentState.blocks()).removeGlobalBlock(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID) + ) .build(); logger.trace("becomeClusterManagerAndTrimConflictingNodes: {}", tmpState.nodes()); allocationService.cleanCaches(); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/NoMasterBlockService.java b/server/src/main/java/org/opensearch/cluster/coordination/NoClusterManagerBlockService.java similarity index 93% rename from server/src/main/java/org/opensearch/cluster/coordination/NoMasterBlockService.java rename to server/src/main/java/org/opensearch/cluster/coordination/NoClusterManagerBlockService.java index c861456d66330..44ced46048736 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/NoMasterBlockService.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/NoClusterManagerBlockService.java @@ -46,7 +46,7 @@ * * @opensearch.internal */ -public class NoMasterBlockService { +public class NoClusterManagerBlockService { public static final int NO_MASTER_BLOCK_ID = 2; public static final ClusterBlock NO_MASTER_BLOCK_WRITES = new ClusterBlock( NO_MASTER_BLOCK_ID, @@ -79,7 +79,7 @@ public class NoMasterBlockService { public static final Setting NO_MASTER_BLOCK_SETTING = new Setting<>( "cluster.no_master_block", "metadata_write", - NoMasterBlockService::parseNoClusterManagerBlock, + NoClusterManagerBlockService::parseNoClusterManagerBlock, Property.Dynamic, Property.NodeScope, Property.Deprecated @@ -89,14 +89,14 @@ public class NoMasterBlockService { public static final Setting NO_CLUSTER_MANAGER_BLOCK_SETTING = new Setting<>( "cluster.no_cluster_manager_block", NO_MASTER_BLOCK_SETTING, - NoMasterBlockService::parseNoClusterManagerBlock, + NoClusterManagerBlockService::parseNoClusterManagerBlock, Property.Dynamic, Property.NodeScope ); private volatile ClusterBlock noClusterManagerBlock; - public NoMasterBlockService(Settings settings, ClusterSettings clusterSettings) { + public NoClusterManagerBlockService(Settings settings, ClusterSettings clusterSettings) { this.noClusterManagerBlock = NO_CLUSTER_MANAGER_BLOCK_SETTING.get(settings); clusterSettings.addSettingsUpdateConsumer(NO_CLUSTER_MANAGER_BLOCK_SETTING, this::setNoMasterBlock); } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/NodeToolCli.java b/server/src/main/java/org/opensearch/cluster/coordination/NodeToolCli.java index 3db2171b4cca0..7d51002150fb9 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/NodeToolCli.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/NodeToolCli.java @@ -39,7 +39,7 @@ // NodeToolCli does not extend LoggingAwareCommand, because LoggingAwareCommand performs logging initialization // after LoggingAwareCommand instance is constructed. -// It's too late for us, because before UnsafeBootstrapMasterCommand is added to the list of subcommands +// It's too late for us, because before UnsafeBootstrapClusterManagerCommand is added to the list of subcommands // log4j2 initialization will happen, because it has static reference to Logger class. // Even if we avoid making a static reference to Logger class, there is no nice way to avoid declaring // UNSAFE_BOOTSTRAP, which depends on ClusterService, which in turn has static Logger. @@ -56,7 +56,7 @@ public NodeToolCli() { super("A CLI tool to do unsafe cluster and index manipulations on current node", () -> {}); CommandLoggingConfigurator.configureLoggingWithoutConfig(); subcommands.put("repurpose", new NodeRepurposeCommand()); - subcommands.put("unsafe-bootstrap", new UnsafeBootstrapMasterCommand()); + subcommands.put("unsafe-bootstrap", new UnsafeBootstrapClusterManagerCommand()); subcommands.put("detach-cluster", new DetachClusterCommand()); subcommands.put("override-version", new OverrideNodeVersionCommand()); subcommands.put("remove-settings", new RemoveSettingsCommand()); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapMasterCommand.java b/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapClusterManagerCommand.java similarity index 98% rename from server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapMasterCommand.java rename to server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapClusterManagerCommand.java index 1f17844adf4fe..8506a59b22763 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapMasterCommand.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapClusterManagerCommand.java @@ -59,7 +59,7 @@ * * @opensearch.internal */ -public class UnsafeBootstrapMasterCommand extends OpenSearchNodeCommand { +public class UnsafeBootstrapClusterManagerCommand extends OpenSearchNodeCommand { static final String CLUSTER_STATE_TERM_VERSION_MSG_FORMAT = "Current node cluster state (term, version) pair is (%s, %s)"; static final String CONFIRMATION_MSG = DELIMITER @@ -85,7 +85,7 @@ public class UnsafeBootstrapMasterCommand extends OpenSearchNodeCommand { private OptionSpec applyClusterReadOnlyBlockOption; - UnsafeBootstrapMasterCommand() { + UnsafeBootstrapClusterManagerCommand() { super( "Forces the successful election of the current node after the permanent loss of the half or more cluster-manager-eligible nodes" ); diff --git a/server/src/main/java/org/opensearch/cluster/routing/BatchedRerouteService.java b/server/src/main/java/org/opensearch/cluster/routing/BatchedRerouteService.java index a3fa683acba6f..0738254823964 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/BatchedRerouteService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/BatchedRerouteService.java @@ -39,7 +39,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; -import org.opensearch.cluster.NotMasterException; +import org.opensearch.cluster.NotClusterManagerException; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; import org.opensearch.common.Priority; @@ -50,7 +50,7 @@ /** * A {@link BatchedRerouteService} is a {@link RerouteService} that batches together reroute requests to avoid unnecessary extra reroutes. - * This component only does meaningful work on the elected cluster-manager node. Reroute requests will fail with a {@link NotMasterException} on + * This component only does meaningful work on the elected cluster-manager node. Reroute requests will fail with a {@link NotClusterManagerException} on * other nodes. * * @opensearch.internal @@ -147,7 +147,10 @@ public void onNoLongerMaster(String source) { pendingRerouteListeners = null; } } - ActionListener.onFailure(currentListeners, new NotMasterException("delayed reroute [" + reason + "] cancelled")); + ActionListener.onFailure( + currentListeners, + new NotClusterManagerException("delayed reroute [" + reason + "] cancelled") + ); // no big deal, the new cluster-manager will reroute again } diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java index 76ac93d7d8d85..bc1397989dc96 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java @@ -41,7 +41,7 @@ import org.opensearch.cluster.ClusterStateListener; import org.opensearch.cluster.ClusterStateObserver; import org.opensearch.cluster.ClusterStateTaskConfig; -import org.opensearch.cluster.LocalNodeMasterListener; +import org.opensearch.cluster.LocalNodeClusterManagerListener; import org.opensearch.cluster.NodeConnectionsService; import org.opensearch.cluster.TimeoutClusterStateListener; import org.opensearch.cluster.metadata.ProcessClusterEventTimeoutException; @@ -277,7 +277,7 @@ public void removeTimeoutListener(TimeoutClusterStateListener listener) { /** * Add a listener for on/off local node cluster-manager events */ - public void addLocalNodeMasterListener(LocalNodeMasterListener listener) { + public void addLocalNodeMasterListener(LocalNodeClusterManagerListener listener) { addListener(listener); } diff --git a/server/src/main/java/org/opensearch/cluster/service/MasterService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerService.java similarity index 99% rename from server/src/main/java/org/opensearch/cluster/service/MasterService.java rename to server/src/main/java/org/opensearch/cluster/service/ClusterManagerService.java index 0f9106f6de0c9..df691c1443a56 100644 --- a/server/src/main/java/org/opensearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerService.java @@ -88,8 +88,8 @@ * * @opensearch.internal */ -public class MasterService extends AbstractLifecycleComponent { - private static final Logger logger = LogManager.getLogger(MasterService.class); +public class ClusterManagerService extends AbstractLifecycleComponent { + private static final Logger logger = LogManager.getLogger(ClusterManagerService.class); public static final Setting MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING = Setting.positiveTimeSetting( "cluster.service.slow_master_task_logging_threshold", @@ -122,7 +122,7 @@ public class MasterService extends AbstractLifecycleComponent { private volatile PrioritizedOpenSearchThreadPoolExecutor threadPoolExecutor; private volatile Batcher taskBatcher; - public MasterService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { + public ClusterManagerService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { this.nodeName = Objects.requireNonNull(Node.NODE_NAME_SETTING.get(settings)); this.slowTaskLoggingThreshold = CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings); @@ -825,7 +825,7 @@ private ClusterTasksResult executeTasks(TaskInputs taskInputs, ClusterSt if (previousClusterState != clusterTasksResult.resultingState && previousClusterState.nodes().isLocalNodeElectedMaster() && (clusterTasksResult.resultingState.nodes().isLocalNodeElectedMaster() == false)) { - throw new AssertionError("update task submitted to MasterService cannot remove master"); + throw new AssertionError("update task submitted to ClusterManagerService cannot remove cluster-manager"); } } catch (Exception e) { logger.trace( diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterService.java index 3d643c96e65dc..7fd8ed62def07 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterService.java @@ -39,7 +39,7 @@ import org.opensearch.cluster.ClusterStateTaskConfig; import org.opensearch.cluster.ClusterStateTaskExecutor; import org.opensearch.cluster.ClusterStateTaskListener; -import org.opensearch.cluster.LocalNodeMasterListener; +import org.opensearch.cluster.LocalNodeClusterManagerListener; import org.opensearch.cluster.NodeConnectionsService; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.OperationRouting; @@ -62,7 +62,7 @@ * @opensearch.internal */ public class ClusterService extends AbstractLifecycleComponent { - private final MasterService masterService; + private final ClusterManagerService clusterManagerService; private final ClusterApplierService clusterApplierService; @@ -92,7 +92,7 @@ public ClusterService(Settings settings, ClusterSettings clusterSettings, Thread this( settings, clusterSettings, - new MasterService(settings, clusterSettings, threadPool), + new ClusterManagerService(settings, clusterSettings, threadPool), new ClusterApplierService(Node.NODE_NAME_SETTING.get(settings), settings, clusterSettings, threadPool) ); } @@ -100,12 +100,12 @@ public ClusterService(Settings settings, ClusterSettings clusterSettings, Thread public ClusterService( Settings settings, ClusterSettings clusterSettings, - MasterService masterService, + ClusterManagerService clusterManagerService, ClusterApplierService clusterApplierService ) { this.settings = settings; this.nodeName = Node.NODE_NAME_SETTING.get(settings); - this.masterService = masterService; + this.clusterManagerService = clusterManagerService; this.operationRouting = new OperationRouting(settings, clusterSettings); this.clusterSettings = clusterSettings; this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); @@ -131,18 +131,18 @@ public RerouteService getRerouteService() { @Override protected synchronized void doStart() { clusterApplierService.start(); - masterService.start(); + clusterManagerService.start(); } @Override protected synchronized void doStop() { - masterService.stop(); + clusterManagerService.stop(); clusterApplierService.stop(); } @Override protected synchronized void doClose() { - masterService.close(); + clusterManagerService.close(); clusterApplierService.close(); } @@ -214,12 +214,12 @@ public void removeListener(ClusterStateListener listener) { /** * Add a listener for on/off local node cluster-manager events */ - public void addLocalNodeMasterListener(LocalNodeMasterListener listener) { + public void addLocalNodeMasterListener(LocalNodeClusterManagerListener listener) { clusterApplierService.addLocalNodeMasterListener(listener); } - public MasterService getMasterService() { - return masterService; + public ClusterManagerService getMasterService() { + return clusterManagerService; } /** @@ -242,7 +242,7 @@ public ClusterApplierService getClusterApplierService() { public static boolean assertClusterOrMasterStateThread() { assert Thread.currentThread().getName().contains(ClusterApplierService.CLUSTER_UPDATE_THREAD_NAME) - || Thread.currentThread().getName().contains(MasterService.MASTER_UPDATE_THREAD_NAME) + || Thread.currentThread().getName().contains(ClusterManagerService.MASTER_UPDATE_THREAD_NAME) : "not called from the master/cluster state update thread"; return true; } @@ -333,6 +333,6 @@ public void submitStateUpdateTasks( final ClusterStateTaskConfig config, final ClusterStateTaskExecutor executor ) { - masterService.submitStateUpdateTasks(source, tasks, config, executor); + clusterManagerService.submitStateUpdateTasks(source, tasks, config, executor); } } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 9ba56dfa6456f..4451d11a7b976 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -62,7 +62,7 @@ import org.opensearch.cluster.coordination.JoinHelper; import org.opensearch.cluster.coordination.LagDetector; import org.opensearch.cluster.coordination.LeaderChecker; -import org.opensearch.cluster.coordination.NoMasterBlockService; +import org.opensearch.cluster.coordination.NoClusterManagerBlockService; import org.opensearch.cluster.coordination.Reconfigurator; import org.opensearch.cluster.metadata.IndexGraveyard; import org.opensearch.cluster.metadata.Metadata; @@ -81,7 +81,7 @@ import org.opensearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.opensearch.cluster.service.ClusterApplierService; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.cluster.service.MasterService; +import org.opensearch.cluster.service.ClusterManagerService; import org.opensearch.common.logging.Loggers; import org.opensearch.common.network.NetworkModule; import org.opensearch.common.network.NetworkService; @@ -273,8 +273,8 @@ public void apply(Settings value, Settings current, Settings previous) { InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING, InternalSnapshotsInfoService.INTERNAL_SNAPSHOT_INFO_MAX_CONCURRENT_FETCHES_SETTING, DestructiveOperations.REQUIRES_NAME_SETTING, - NoMasterBlockService.NO_MASTER_BLOCK_SETTING, // deprecated - NoMasterBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING, + NoClusterManagerBlockService.NO_MASTER_BLOCK_SETTING, // deprecated + NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING, GatewayService.EXPECTED_DATA_NODES_SETTING, GatewayService.EXPECTED_MASTER_NODES_SETTING, GatewayService.EXPECTED_NODES_SETTING, @@ -334,8 +334,8 @@ public void apply(Settings value, Settings current, Settings previous) { IndexModule.NODE_STORE_ALLOW_MMAP, ClusterApplierService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, ClusterService.USER_DEFINED_METADATA, - MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, // deprecated - MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, + ClusterManagerService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, // deprecated + ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, SearchService.DEFAULT_SEARCH_TIMEOUT_SETTING, SearchService.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS, TransportSearchAction.SHARD_COUNT_LIMIT_SETTING, diff --git a/server/src/main/java/org/opensearch/common/settings/ConsistentSettingsService.java b/server/src/main/java/org/opensearch/common/settings/ConsistentSettingsService.java index 3be1c4b080b5f..062d62956d377 100644 --- a/server/src/main/java/org/opensearch/common/settings/ConsistentSettingsService.java +++ b/server/src/main/java/org/opensearch/common/settings/ConsistentSettingsService.java @@ -36,7 +36,7 @@ import org.apache.logging.log4j.Logger; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; -import org.opensearch.cluster.LocalNodeMasterListener; +import org.opensearch.cluster.LocalNodeClusterManagerListener; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Priority; @@ -88,10 +88,10 @@ public ConsistentSettingsService(Settings settings, ClusterService clusterServic } /** - * Returns a {@link LocalNodeMasterListener} that will publish hashes of all the settings passed in the constructor. These hashes are + * Returns a {@link LocalNodeClusterManagerListener} that will publish hashes of all the settings passed in the constructor. These hashes are * published by the cluster-manager node only. Note that this is not designed for {@link SecureSettings} implementations that are mutable. */ - public LocalNodeMasterListener newHashPublisher() { + public LocalNodeClusterManagerListener newHashPublisher() { // eagerly compute hashes to be published final Map computedHashesOfConsistentSettings = computeHashesOfConsistentSecureSettings(); return new HashesPublisher(computedHashesOfConsistentSettings, clusterService); @@ -246,7 +246,7 @@ private byte[] computeSaltedPBKDF2Hash(byte[] bytes, byte[] salt) { } } - static final class HashesPublisher implements LocalNodeMasterListener { + static final class HashesPublisher implements LocalNodeClusterManagerListener { // eagerly compute hashes to be published final Map computedHashesOfConsistentSettings; diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/BaseFuture.java b/server/src/main/java/org/opensearch/common/util/concurrent/BaseFuture.java index fef37299b349d..e4f8e1a221a0d 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/BaseFuture.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/BaseFuture.java @@ -33,7 +33,7 @@ package org.opensearch.common.util.concurrent; import org.opensearch.cluster.service.ClusterApplierService; -import org.opensearch.cluster.service.MasterService; +import org.opensearch.cluster.service.ClusterManagerService; import org.opensearch.common.Nullable; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transports; @@ -109,7 +109,7 @@ protected boolean blockingAllowed() { return Transports.assertNotTransportThread(BLOCKING_OP_REASON) && ThreadPool.assertNotScheduleThread(BLOCKING_OP_REASON) && ClusterApplierService.assertNotClusterStateUpdateThread(BLOCKING_OP_REASON) - && MasterService.assertNotMasterUpdateThread(BLOCKING_OP_REASON); + && ClusterManagerService.assertNotMasterUpdateThread(BLOCKING_OP_REASON); } @Override diff --git a/server/src/main/java/org/opensearch/discovery/MasterNotDiscoveredException.java b/server/src/main/java/org/opensearch/discovery/ClusterManagerNotDiscoveredException.java similarity index 82% rename from server/src/main/java/org/opensearch/discovery/MasterNotDiscoveredException.java rename to server/src/main/java/org/opensearch/discovery/ClusterManagerNotDiscoveredException.java index dec50388b5624..1f2db95ed4203 100644 --- a/server/src/main/java/org/opensearch/discovery/MasterNotDiscoveredException.java +++ b/server/src/main/java/org/opensearch/discovery/ClusterManagerNotDiscoveredException.java @@ -43,17 +43,17 @@ * * @opensearch.internal */ -public class MasterNotDiscoveredException extends OpenSearchException { +public class ClusterManagerNotDiscoveredException extends OpenSearchException { - public MasterNotDiscoveredException() { + public ClusterManagerNotDiscoveredException() { super(""); } - public MasterNotDiscoveredException(Throwable cause) { + public ClusterManagerNotDiscoveredException(Throwable cause) { super(cause); } - public MasterNotDiscoveredException(String message) { + public ClusterManagerNotDiscoveredException(String message) { super(message); } @@ -62,7 +62,7 @@ public RestStatus status() { return RestStatus.SERVICE_UNAVAILABLE; } - public MasterNotDiscoveredException(StreamInput in) throws IOException { + public ClusterManagerNotDiscoveredException(StreamInput in) throws IOException { super(in); } } diff --git a/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java b/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java index c12283df1dbc9..44f44fa055b2b 100644 --- a/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java +++ b/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java @@ -41,7 +41,7 @@ import org.opensearch.cluster.routing.RerouteService; import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.service.ClusterApplier; -import org.opensearch.cluster.service.MasterService; +import org.opensearch.cluster.service.ClusterManagerService; import org.opensearch.common.Randomness; import org.opensearch.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.network.NetworkService; @@ -121,7 +121,7 @@ public DiscoveryModule( TransportService transportService, NamedWriteableRegistry namedWriteableRegistry, NetworkService networkService, - MasterService clusterManagerService, + ClusterManagerService clusterManagerService, ClusterApplier clusterApplier, ClusterSettings clusterSettings, List plugins, diff --git a/server/src/main/java/org/opensearch/gateway/LocalAllocateDangledIndices.java b/server/src/main/java/org/opensearch/gateway/LocalAllocateDangledIndices.java index fe0eacc244f32..98b3c104aadbf 100644 --- a/server/src/main/java/org/opensearch/gateway/LocalAllocateDangledIndices.java +++ b/server/src/main/java/org/opensearch/gateway/LocalAllocateDangledIndices.java @@ -53,7 +53,7 @@ import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.settings.Settings; -import org.opensearch.discovery.MasterNotDiscoveredException; +import org.opensearch.discovery.ClusterManagerNotDiscoveredException; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportChannel; @@ -110,7 +110,7 @@ public void allocateDangled(Collection indices, ActionListener replacedRoutes() { diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java index dcedbc42aa6f5..c87bf33727524 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java @@ -56,7 +56,7 @@ import org.opensearch.cluster.ClusterStateTaskExecutor; import org.opensearch.cluster.ClusterStateTaskListener; import org.opensearch.cluster.ClusterStateUpdateTask; -import org.opensearch.cluster.NotMasterException; +import org.opensearch.cluster.NotClusterManagerException; import org.opensearch.cluster.RepositoryCleanupInProgress; import org.opensearch.cluster.RestoreInProgress; import org.opensearch.cluster.SnapshotDeletionsInProgress; @@ -1884,7 +1884,7 @@ private List>> endAndGetListe */ private void handleFinalizationFailure(Exception e, SnapshotsInProgress.Entry entry, RepositoryData repositoryData) { Snapshot snapshot = entry.snapshot(); - if (ExceptionsHelper.unwrap(e, NotMasterException.class, FailedToCommitClusterStateException.class) != null) { + if (ExceptionsHelper.unwrap(e, NotClusterManagerException.class, FailedToCommitClusterStateException.class) != null) { // Failure due to not being cluster-manager any more, don't try to remove snapshot from cluster state the next cluster-manager // will try ending this snapshot again logger.debug(() -> new ParameterizedMessage("[{}] failed to update cluster state during snapshot finalization", snapshot), e); @@ -2094,7 +2094,7 @@ public void onFailure(String source, Exception e) { public void onNoLongerMaster(String source) { failure.addSuppressed(new SnapshotException(snapshot, "no longer cluster-manager")); failSnapshotCompletionListeners(snapshot, failure); - failAllListenersOnMasterFailOver(new NotMasterException(source)); + failAllListenersOnMasterFailOver(new NotClusterManagerException(source)); if (listener != null) { listener.onNoLongerMaster(); } @@ -2306,7 +2306,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS createDeleteStateUpdate(outstandingDeletes, repoName, result.v1(), Priority.IMMEDIATE, listener) ); }, e -> { - if (ExceptionsHelper.unwrap(e, NotMasterException.class, FailedToCommitClusterStateException.class) != null) { + if (ExceptionsHelper.unwrap(e, NotClusterManagerException.class, FailedToCommitClusterStateException.class) != null) { logger.warn("cluster-manager failover before deleted snapshot could complete", e); // Just pass the exception to the transport handler as is so it is retried on the new cluster-manager listener.onFailure(e); @@ -2788,7 +2788,7 @@ protected void handleListeners(List> deleteListeners) { private void failAllListenersOnMasterFailOver(Exception e) { logger.debug("Failing all snapshot operation listeners because this node is not cluster-manager any longer", e); synchronized (currentlyFinalizing) { - if (ExceptionsHelper.unwrap(e, NotMasterException.class, FailedToCommitClusterStateException.class) != null) { + if (ExceptionsHelper.unwrap(e, NotClusterManagerException.class, FailedToCommitClusterStateException.class) != null) { repositoryOperations.clear(); for (Snapshot snapshot : new HashSet<>(snapshotCompletionListeners.keySet())) { failSnapshotCompletionListeners(snapshot, new SnapshotException(snapshot, "no longer cluster-manager")); diff --git a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java index 2046834fa9585..2af68bbedb456 100644 --- a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java @@ -44,10 +44,11 @@ import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.action.support.replication.ReplicationOperation; import org.opensearch.client.AbstractClientHeadersTestCase; +import org.opensearch.cluster.NotClusterManagerException; import org.opensearch.cluster.action.shard.ShardStateAction; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.coordination.CoordinationStateRejectedException; -import org.opensearch.cluster.coordination.NoMasterBlockService; +import org.opensearch.cluster.coordination.NoClusterManagerBlockService; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.IllegalShardRoutingStateException; import org.opensearch.cluster.routing.ShardRouting; @@ -69,6 +70,7 @@ import org.opensearch.common.util.CancellableThreadsTests; import org.opensearch.common.util.set.Sets; import org.opensearch.common.xcontent.XContentLocation; +import org.opensearch.discovery.ClusterManagerNotDiscoveredException; import org.opensearch.env.ShardLockObtainFailedException; import org.opensearch.index.Index; import org.opensearch.index.engine.RecoveryEngineException; @@ -511,9 +513,9 @@ public void testFailedNodeException() throws IOException { } public void testClusterBlockException() throws IOException { - ClusterBlockException ex = serialize(new ClusterBlockException(singleton(NoMasterBlockService.NO_MASTER_BLOCK_WRITES))); + ClusterBlockException ex = serialize(new ClusterBlockException(singleton(NoClusterManagerBlockService.NO_MASTER_BLOCK_WRITES))); assertEquals("blocked by: [SERVICE_UNAVAILABLE/2/no cluster-manager];", ex.getMessage()); - assertTrue(ex.blocks().contains(NoMasterBlockService.NO_MASTER_BLOCK_WRITES)); + assertTrue(ex.blocks().contains(NoClusterManagerBlockService.NO_MASTER_BLOCK_WRITES)); assertEquals(1, ex.blocks().size()); } @@ -695,7 +697,7 @@ public void testIds() { ids.put(0, org.opensearch.index.snapshots.IndexShardSnapshotFailedException.class); ids.put(1, org.opensearch.search.dfs.DfsPhaseExecutionException.class); ids.put(2, org.opensearch.common.util.CancellableThreads.ExecutionCancelledException.class); - ids.put(3, org.opensearch.discovery.MasterNotDiscoveredException.class); + ids.put(3, ClusterManagerNotDiscoveredException.class); ids.put(4, org.opensearch.OpenSearchSecurityException.class); ids.put(5, org.opensearch.index.snapshots.IndexShardRestoreException.class); ids.put(6, org.opensearch.indices.IndexClosedException.class); @@ -833,7 +835,7 @@ public void testIds() { ids.put(141, org.opensearch.index.query.QueryShardException.class); ids.put(142, ShardStateAction.NoLongerPrimaryShardException.class); ids.put(143, org.opensearch.script.ScriptException.class); - ids.put(144, org.opensearch.cluster.NotMasterException.class); + ids.put(144, NotClusterManagerException.class); ids.put(145, org.opensearch.OpenSearchStatusException.class); ids.put(146, org.opensearch.tasks.TaskCancelledException.class); ids.put(147, org.opensearch.env.ShardLockObtainFailedException.class); diff --git a/server/src/test/java/org/opensearch/OpenSearchExceptionTests.java b/server/src/test/java/org/opensearch/OpenSearchExceptionTests.java index 31c2d77370941..ca2940c742ec0 100644 --- a/server/src/test/java/org/opensearch/OpenSearchExceptionTests.java +++ b/server/src/test/java/org/opensearch/OpenSearchExceptionTests.java @@ -41,7 +41,7 @@ import org.opensearch.action.support.broadcast.BroadcastShardOperationFailedException; import org.opensearch.client.transport.NoNodeAvailableException; import org.opensearch.cluster.block.ClusterBlockException; -import org.opensearch.cluster.coordination.NoMasterBlockService; +import org.opensearch.cluster.coordination.NoClusterManagerBlockService; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.ParsingException; import org.opensearch.common.Strings; @@ -478,7 +478,7 @@ public void testToXContentWithHeadersAndMetadata() throws IOException { "foo", new OpenSearchException( "bar", - new OpenSearchException("baz", new ClusterBlockException(singleton(NoMasterBlockService.NO_MASTER_BLOCK_WRITES))) + new OpenSearchException("baz", new ClusterBlockException(singleton(NoClusterManagerBlockService.NO_MASTER_BLOCK_WRITES))) ) ); e.addHeader("foo_0", "0"); @@ -1032,7 +1032,7 @@ public static Tuple randomExceptions() { int type = randomIntBetween(0, 5); switch (type) { case 0: - actual = new ClusterBlockException(singleton(NoMasterBlockService.NO_MASTER_BLOCK_WRITES)); + actual = new ClusterBlockException(singleton(NoClusterManagerBlockService.NO_MASTER_BLOCK_WRITES)); expected = new OpenSearchException( "OpenSearch exception [type=cluster_block_exception, " + "reason=blocked by: [SERVICE_UNAVAILABLE/2/no cluster-manager];]" diff --git a/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java b/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java index 155e75653bbf9..976ce5cfee63e 100644 --- a/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java +++ b/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java @@ -69,7 +69,7 @@ import org.opensearch.rest.action.cat.RestAllocationAction; import org.opensearch.rest.action.cat.RestRepositoriesAction; import org.opensearch.rest.action.cat.RestThreadPoolAction; -import org.opensearch.rest.action.cat.RestMasterAction; +import org.opensearch.rest.action.cat.RestClusterManagerAction; import org.opensearch.rest.action.cat.RestShardsAction; import org.opensearch.rest.action.cat.RestPluginsAction; import org.opensearch.rest.action.cat.RestNodeAttrsAction; @@ -161,7 +161,7 @@ public void testCatIndices() { } public void testCatClusterManager() { - RestMasterAction action = new RestMasterAction(); + RestClusterManagerAction action = new RestClusterManagerAction(); Exception e = assertThrows(OpenSearchParseException.class, () -> action.doCatRequest(getRestRequestWithBothParams(), client)); assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); diff --git a/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java b/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java index a544bad4cd9e6..91d780e218e71 100644 --- a/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java +++ b/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java @@ -38,7 +38,7 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.action.shard.ShardStateAction; import org.opensearch.cluster.block.ClusterBlocks; -import org.opensearch.cluster.coordination.NoMasterBlockService; +import org.opensearch.cluster.coordination.NoClusterManagerBlockService; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.ShardRouting; @@ -116,7 +116,7 @@ public void testResyncDoesNotBlockOnPrimaryAction() throws Exception { ClusterState.builder(clusterService.state()) .blocks( ClusterBlocks.builder() - .addGlobalBlock(NoMasterBlockService.NO_MASTER_BLOCK_ALL) + .addGlobalBlock(NoClusterManagerBlockService.NO_MASTER_BLOCK_ALL) .addIndexBlock(indexName, IndexMetadata.INDEX_WRITE_BLOCK) ) ); diff --git a/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java index a5828edd65c16..acfdf7982e9d0 100644 --- a/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java @@ -42,7 +42,7 @@ import org.opensearch.action.support.ThreadedActionListener; import org.opensearch.action.support.replication.ClusterStateCreationUtils; import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.NotMasterException; +import org.opensearch.cluster.NotClusterManagerException; import org.opensearch.cluster.block.ClusterBlock; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -58,7 +58,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.discovery.MasterNotDiscoveredException; +import org.opensearch.discovery.ClusterManagerNotDiscoveredException; import org.opensearch.node.NodeClosedException; import org.opensearch.rest.RestStatus; import org.opensearch.tasks.Task; @@ -314,7 +314,7 @@ protected ClusterBlockException checkBlock(Request request, ClusterState state) listener.get(); fail("Expected exception but returned proper result"); } catch (ExecutionException ex) { - assertThat(ex.getCause(), instanceOf(MasterNotDiscoveredException.class)); + assertThat(ex.getCause(), instanceOf(ClusterManagerNotDiscoveredException.class)); assertThat(ex.getCause().getCause(), instanceOf(ClusterBlockException.class)); } } else { @@ -382,7 +382,7 @@ public void testClusterManagerNotAvailable() throws ExecutionException, Interrup PlainActionFuture listener = new PlainActionFuture<>(); new Action("internal:testAction", transportService, clusterService, threadPool).execute(request, listener); assertTrue(listener.isDone()); - assertListenerThrows("MasterNotDiscoveredException should be thrown", listener, MasterNotDiscoveredException.class); + assertListenerThrows("ClusterManagerNotDiscoveredException should be thrown", listener, ClusterManagerNotDiscoveredException.class); } public void testClusterManagerBecomesAvailable() throws ExecutionException, InterruptedException { @@ -517,7 +517,7 @@ protected void masterOperation(Request request, ClusterState state, ActionListen setState(clusterService, ClusterStateCreationUtils.state(localNode, remoteNode, allNodes)); Exception failure = randomBoolean() ? new FailedToCommitClusterStateException("Fake error") - : new NotMasterException("Fake error"); + : new NotClusterManagerException("Fake error"); listener.onFailure(failure); } }.execute(request, listener); diff --git a/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java b/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java index 251703a933525..6005ca9150488 100644 --- a/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java +++ b/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java @@ -42,14 +42,14 @@ import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.coordination.DeterministicTaskQueue; import org.opensearch.cluster.coordination.MockSinglePrioritizingExecutor; -import org.opensearch.cluster.coordination.NoMasterBlockService; +import org.opensearch.cluster.coordination.NoClusterManagerBlockService; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.service.ClusterApplier; import org.opensearch.cluster.service.ClusterApplierService; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.cluster.service.FakeThreadPoolMasterService; -import org.opensearch.cluster.service.MasterService; +import org.opensearch.cluster.service.FakeThreadPoolClusterManagerService; +import org.opensearch.cluster.service.ClusterManagerService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor; @@ -87,7 +87,7 @@ protected PrioritizedOpenSearchThreadPoolExecutor createThreadPoolExecutor() { } }; - final MasterService clusterManagerService = new FakeThreadPoolMasterService( + final ClusterManagerService clusterManagerService = new FakeThreadPoolClusterManagerService( "test", "clusterManagerService", threadPool, @@ -208,7 +208,7 @@ protected void requestCount++; // ClusterInfoService handles ClusterBlockExceptions quietly, so we invent such an exception to avoid excess logging listener.onFailure( - new ClusterBlockException(org.opensearch.common.collect.Set.of(NoMasterBlockService.NO_MASTER_BLOCK_ALL)) + new ClusterBlockException(org.opensearch.common.collect.Set.of(NoClusterManagerBlockService.NO_MASTER_BLOCK_ALL)) ); } else { fail("unexpected action: " + action.name()); diff --git a/server/src/test/java/org/opensearch/cluster/action/shard/ShardStateActionTests.java b/server/src/test/java/org/opensearch/cluster/action/shard/ShardStateActionTests.java index cf34af718c660..a61d878d90c7c 100644 --- a/server/src/test/java/org/opensearch/cluster/action/shard/ShardStateActionTests.java +++ b/server/src/test/java/org/opensearch/cluster/action/shard/ShardStateActionTests.java @@ -39,7 +39,7 @@ import org.opensearch.action.support.replication.ClusterStateCreationUtils; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateObserver; -import org.opensearch.cluster.NotMasterException; +import org.opensearch.cluster.NotClusterManagerException; import org.opensearch.cluster.action.shard.ShardStateAction.FailedShardEntry; import org.opensearch.cluster.action.shard.ShardStateAction.StartedShardEntry; import org.opensearch.cluster.coordination.FailedToCommitClusterStateException; @@ -256,7 +256,7 @@ public void testClusterManagerChannelException() throws InterruptedException { if (randomBoolean()) { transport.handleRemoteError( requestId, - randomFrom(new NotMasterException("simulated"), new FailedToCommitClusterStateException("simulated")) + randomFrom(new NotClusterManagerException("simulated"), new FailedToCommitClusterStateException("simulated")) ); } else { if (randomBoolean()) { diff --git a/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java index 44239fdc0883f..b9ae08775d11a 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java @@ -82,10 +82,10 @@ import static org.opensearch.cluster.coordination.LeaderChecker.LEADER_CHECK_INTERVAL_SETTING; import static org.opensearch.cluster.coordination.LeaderChecker.LEADER_CHECK_RETRY_COUNT_SETTING; import static org.opensearch.cluster.coordination.LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING; -import static org.opensearch.cluster.coordination.NoMasterBlockService.NO_MASTER_BLOCK_ALL; -import static org.opensearch.cluster.coordination.NoMasterBlockService.NO_MASTER_BLOCK_METADATA_WRITES; -import static org.opensearch.cluster.coordination.NoMasterBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING; -import static org.opensearch.cluster.coordination.NoMasterBlockService.NO_MASTER_BLOCK_WRITES; +import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_MASTER_BLOCK_ALL; +import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_MASTER_BLOCK_METADATA_WRITES; +import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING; +import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_MASTER_BLOCK_WRITES; import static org.opensearch.cluster.coordination.Reconfigurator.CLUSTER_AUTO_SHRINK_VOTING_CONFIGURATION; import static org.opensearch.discovery.PeerFinder.DISCOVERY_FIND_PEERS_INTERVAL_SETTING; import static org.opensearch.monitor.StatusInfo.Status.HEALTHY; diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java index b53cf9ddf1dd2..a3c945cdbac3a 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java @@ -37,7 +37,7 @@ import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.NotMasterException; +import org.opensearch.cluster.NotClusterManagerException; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.settings.Settings; @@ -186,7 +186,7 @@ public void testFailedJoinAttemptLogLevel() { assertThat( JoinHelper.FailedJoinAttempt.getLogLevel( - new RemoteTransportException("caused by NotMasterException", new NotMasterException("test")) + new RemoteTransportException("caused by NotClusterManagerException", new NotClusterManagerException("test")) ), is(Level.DEBUG) ); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/NoMasterBlockServiceRenamedSettingTests.java b/server/src/test/java/org/opensearch/cluster/coordination/NoClusterManagerBlockServiceRenamedSettingTests.java similarity index 62% rename from server/src/test/java/org/opensearch/cluster/coordination/NoMasterBlockServiceRenamedSettingTests.java rename to server/src/test/java/org/opensearch/cluster/coordination/NoClusterManagerBlockServiceRenamedSettingTests.java index 2c988cad5f79c..42a230d05cbd4 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/NoMasterBlockServiceRenamedSettingTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/NoClusterManagerBlockServiceRenamedSettingTests.java @@ -21,7 +21,7 @@ * after it is deprecated, so that the backwards compatibility is maintained. * The test can be removed along with removing support of the deprecated setting. */ -public class NoMasterBlockServiceRenamedSettingTests extends OpenSearchTestCase { +public class NoClusterManagerBlockServiceRenamedSettingTests extends OpenSearchTestCase { /** * Validate the both settings are known and supported. @@ -31,7 +31,10 @@ public void testReindexSettingsExist() { assertTrue( "Both 'cluster.no_cluster_manager_block' and its predecessor should be supported built-in settings.", settings.containsAll( - Arrays.asList(NoMasterBlockService.NO_MASTER_BLOCK_SETTING, NoMasterBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING) + Arrays.asList( + NoClusterManagerBlockService.NO_MASTER_BLOCK_SETTING, + NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING + ) ) ); } @@ -41,8 +44,8 @@ public void testReindexSettingsExist() { */ public void testSettingFallback() { assertEquals( - NoMasterBlockService.NO_MASTER_BLOCK_SETTING.get(Settings.EMPTY), - NoMasterBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.get(Settings.EMPTY) + NoClusterManagerBlockService.NO_MASTER_BLOCK_SETTING.get(Settings.EMPTY), + NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.get(Settings.EMPTY) ); } @@ -52,10 +55,13 @@ public void testSettingFallback() { */ public void testSettingGetValue() { Settings settings = Settings.builder().put("cluster.no_cluster_manager_block", "all").build(); - assertEquals(NoMasterBlockService.NO_MASTER_BLOCK_ALL, NoMasterBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.get(settings)); assertEquals( - NoMasterBlockService.NO_MASTER_BLOCK_SETTING.getDefault(Settings.EMPTY), - NoMasterBlockService.NO_MASTER_BLOCK_SETTING.get(settings) + NoClusterManagerBlockService.NO_MASTER_BLOCK_ALL, + NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.get(settings) + ); + assertEquals( + NoClusterManagerBlockService.NO_MASTER_BLOCK_SETTING.getDefault(Settings.EMPTY), + NoClusterManagerBlockService.NO_MASTER_BLOCK_SETTING.get(settings) ); } @@ -65,10 +71,10 @@ public void testSettingGetValue() { public void testSettingGetValueWithFallback() { Settings settings = Settings.builder().put("cluster.no_master_block", "metadata_write").build(); assertEquals( - NoMasterBlockService.NO_MASTER_BLOCK_METADATA_WRITES, - NoMasterBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.get(settings) + NoClusterManagerBlockService.NO_MASTER_BLOCK_METADATA_WRITES, + NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.get(settings) ); - assertSettingDeprecationsAndWarnings(new Setting[] { NoMasterBlockService.NO_MASTER_BLOCK_SETTING }); + assertSettingDeprecationsAndWarnings(new Setting[] { NoClusterManagerBlockService.NO_MASTER_BLOCK_SETTING }); } /** @@ -79,9 +85,15 @@ public void testSettingGetValueWhenBothAreConfigured() { .put("cluster.no_cluster_manager_block", "all") .put("cluster.no_master_block", "metadata_write") .build(); - assertEquals(NoMasterBlockService.NO_MASTER_BLOCK_ALL, NoMasterBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.get(settings)); - assertEquals(NoMasterBlockService.NO_MASTER_BLOCK_METADATA_WRITES, NoMasterBlockService.NO_MASTER_BLOCK_SETTING.get(settings)); - assertSettingDeprecationsAndWarnings(new Setting[] { NoMasterBlockService.NO_MASTER_BLOCK_SETTING }); + assertEquals( + NoClusterManagerBlockService.NO_MASTER_BLOCK_ALL, + NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.get(settings) + ); + assertEquals( + NoClusterManagerBlockService.NO_MASTER_BLOCK_METADATA_WRITES, + NoClusterManagerBlockService.NO_MASTER_BLOCK_SETTING.get(settings) + ); + assertSettingDeprecationsAndWarnings(new Setting[] { NoClusterManagerBlockService.NO_MASTER_BLOCK_SETTING }); } } diff --git a/server/src/test/java/org/opensearch/cluster/coordination/NoMasterBlockServiceTests.java b/server/src/test/java/org/opensearch/cluster/coordination/NoClusterManagerBlockServiceTests.java similarity index 85% rename from server/src/test/java/org/opensearch/cluster/coordination/NoMasterBlockServiceTests.java rename to server/src/test/java/org/opensearch/cluster/coordination/NoClusterManagerBlockServiceTests.java index ed8027dc8e838..b9a72e00aebb0 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/NoMasterBlockServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/NoClusterManagerBlockServiceTests.java @@ -35,21 +35,21 @@ import org.opensearch.common.settings.Settings; import org.opensearch.test.OpenSearchTestCase; -import static org.opensearch.cluster.coordination.NoMasterBlockService.NO_MASTER_BLOCK_ALL; -import static org.opensearch.cluster.coordination.NoMasterBlockService.NO_MASTER_BLOCK_METADATA_WRITES; -import static org.opensearch.cluster.coordination.NoMasterBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING; -import static org.opensearch.cluster.coordination.NoMasterBlockService.NO_MASTER_BLOCK_WRITES; +import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_MASTER_BLOCK_ALL; +import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_MASTER_BLOCK_METADATA_WRITES; +import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING; +import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_MASTER_BLOCK_WRITES; import static org.opensearch.common.settings.ClusterSettings.BUILT_IN_CLUSTER_SETTINGS; import static org.hamcrest.Matchers.sameInstance; -public class NoMasterBlockServiceTests extends OpenSearchTestCase { +public class NoClusterManagerBlockServiceTests extends OpenSearchTestCase { - private NoMasterBlockService noClusterManagerBlockService; + private NoClusterManagerBlockService noClusterManagerBlockService; private ClusterSettings clusterSettings; private void createService(Settings settings) { clusterSettings = new ClusterSettings(settings, BUILT_IN_CLUSTER_SETTINGS); - noClusterManagerBlockService = new NoMasterBlockService(settings, clusterSettings); + noClusterManagerBlockService = new NoClusterManagerBlockService(settings, clusterSettings); } private void assertDeprecatedWarningEmitted() { diff --git a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java index 2cf8c2c13d3b6..3606d24222679 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java @@ -43,9 +43,9 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; -import org.opensearch.cluster.service.FakeThreadPoolMasterService; -import org.opensearch.cluster.service.MasterService; -import org.opensearch.cluster.service.MasterServiceTests; +import org.opensearch.cluster.service.FakeThreadPoolClusterManagerService; +import org.opensearch.cluster.service.ClusterManagerService; +import org.opensearch.cluster.service.ClusterManagerServiceTests; import org.opensearch.common.Randomness; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; @@ -98,7 +98,7 @@ public class NodeJoinTests extends OpenSearchTestCase { private static ThreadPool threadPool; - private MasterService clusterManagerService; + private ClusterManagerService clusterManagerService; private Coordinator coordinator; private DeterministicTaskQueue deterministicTaskQueue; private Transport transport; @@ -144,7 +144,7 @@ private void setupFakeClusterManagerServiceAndCoordinator(long term, ClusterStat random() ); final ThreadPool fakeThreadPool = deterministicTaskQueue.getThreadPool(); - FakeThreadPoolMasterService fakeClusterManagerService = new FakeThreadPoolMasterService( + FakeThreadPoolClusterManagerService fakeClusterManagerService = new FakeThreadPoolClusterManagerService( "test_node", "test", fakeThreadPool, @@ -166,7 +166,7 @@ private void setupFakeClusterManagerServiceAndCoordinator(long term, ClusterStat } private void setupRealClusterManagerServiceAndCoordinator(long term, ClusterState initialState) { - MasterService clusterManagerService = new MasterService( + ClusterManagerService clusterManagerService = new ClusterManagerService( Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test_node").build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool @@ -191,13 +191,13 @@ private void setupRealClusterManagerServiceAndCoordinator(long term, ClusterStat private void setupClusterManagerServiceAndCoordinator( long term, ClusterState initialState, - MasterService clusterManagerService, + ClusterManagerService clusterManagerService, ThreadPool threadPool, Random random, NodeHealthService nodeHealthService ) { if (this.clusterManagerService != null || coordinator != null) { - throw new IllegalStateException("method setupMasterServiceAndCoordinator can only be called once"); + throw new IllegalStateException("method setupClusterManagerServiceAndCoordinator can only be called once"); } this.clusterManagerService = clusterManagerService; CapturingTransport capturingTransport = new CapturingTransport() { @@ -514,7 +514,7 @@ public void testJoinUpdateVotingConfigExclusion() throws Exception { ); assertTrue( - MasterServiceTests.discoveryState(clusterManagerService) + ClusterManagerServiceTests.discoveryState(clusterManagerService) .getVotingConfigExclusions() .stream() .anyMatch( @@ -746,7 +746,7 @@ public void testConcurrentJoining() { throw new RuntimeException(e); } - assertTrue(MasterServiceTests.discoveryState(clusterManagerService).nodes().isLocalNodeElectedMaster()); + assertTrue(ClusterManagerServiceTests.discoveryState(clusterManagerService).nodes().isLocalNodeElectedMaster()); for (DiscoveryNode successfulNode : successfulNodes) { assertTrue(successfulNode + " joined cluster", clusterStateHasNode(successfulNode)); assertFalse(successfulNode + " voted for cluster-manager", coordinator.missingJoinVoteFrom(successfulNode)); @@ -776,10 +776,10 @@ public void testJoinElectedLeaderWithDeprecatedMasterRole() { } private boolean isLocalNodeElectedMaster() { - return MasterServiceTests.discoveryState(clusterManagerService).nodes().isLocalNodeElectedMaster(); + return ClusterManagerServiceTests.discoveryState(clusterManagerService).nodes().isLocalNodeElectedMaster(); } private boolean clusterStateHasNode(DiscoveryNode node) { - return node.equals(MasterServiceTests.discoveryState(clusterManagerService).nodes().get(node.getId())); + return node.equals(ClusterManagerServiceTests.discoveryState(clusterManagerService).nodes().get(node.getId())); } } diff --git a/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java b/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java index 8a7e14c63d3b0..166fb7a935009 100644 --- a/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java @@ -38,9 +38,9 @@ import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateObserver; -import org.opensearch.cluster.LocalNodeMasterListener; +import org.opensearch.cluster.LocalNodeClusterManagerListener; import org.opensearch.cluster.block.ClusterBlocks; -import org.opensearch.cluster.coordination.NoMasterBlockService; +import org.opensearch.cluster.coordination.NoClusterManagerBlockService; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; @@ -296,7 +296,7 @@ public void testLocalNodeClusterManagerListenerCallbacks() { TimedClusterApplierService timedClusterApplierService = createTimedClusterService(false); AtomicBoolean isClusterManager = new AtomicBoolean(); - timedClusterApplierService.addLocalNodeMasterListener(new LocalNodeMasterListener() { + timedClusterApplierService.addLocalNodeMasterListener(new LocalNodeClusterManagerListener() { @Override public void onMaster() { isClusterManager.set(true); @@ -318,7 +318,7 @@ public void offMaster() { nodes = state.nodes(); nodesBuilder = DiscoveryNodes.builder(nodes).masterNodeId(null); state = ClusterState.builder(state) - .blocks(ClusterBlocks.builder().addGlobalBlock(NoMasterBlockService.NO_MASTER_BLOCK_WRITES)) + .blocks(ClusterBlocks.builder().addGlobalBlock(NoClusterManagerBlockService.NO_MASTER_BLOCK_WRITES)) .nodes(nodesBuilder) .build(); setState(timedClusterApplierService, state); diff --git a/server/src/test/java/org/opensearch/cluster/service/MasterServiceRenamedSettingTests.java b/server/src/test/java/org/opensearch/cluster/service/ClusterManagerServiceRenamedSettingTests.java similarity index 68% rename from server/src/test/java/org/opensearch/cluster/service/MasterServiceRenamedSettingTests.java rename to server/src/test/java/org/opensearch/cluster/service/ClusterManagerServiceRenamedSettingTests.java index acf089dc43b56..e71d872c87527 100644 --- a/server/src/test/java/org/opensearch/cluster/service/MasterServiceRenamedSettingTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/ClusterManagerServiceRenamedSettingTests.java @@ -22,7 +22,7 @@ * after it is deprecated, so that the backwards compatibility is maintained. * The test can be removed along with removing support of the deprecated setting. */ -public class MasterServiceRenamedSettingTests extends OpenSearchTestCase { +public class ClusterManagerServiceRenamedSettingTests extends OpenSearchTestCase { /** * Validate the both settings are known and supported. @@ -33,8 +33,8 @@ public void testClusterManagerServiceSettingsExist() { "Both 'cluster.service.slow_cluster_manager_task_logging_threshold' and its predecessor should be supported built-in settings", settings.containsAll( Arrays.asList( - MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, - MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING + ClusterManagerService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, + ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING ) ) ); @@ -45,8 +45,8 @@ public void testClusterManagerServiceSettingsExist() { */ public void testSettingFallback() { assertEquals( - MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY), - MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY) + ClusterManagerService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY), + ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY) ); } @@ -57,11 +57,11 @@ public void testSettingGetValue() { Settings settings = Settings.builder().put("cluster.service.slow_cluster_manager_task_logging_threshold", "9s").build(); assertEquals( TimeValue.timeValueSeconds(9), - MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings) + ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings) ); assertEquals( - MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.getDefault(Settings.EMPTY), - MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings) + ClusterManagerService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.getDefault(Settings.EMPTY), + ClusterManagerService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings) ); } @@ -73,10 +73,10 @@ public void testSettingGetValueWithFallback() { Settings settings = Settings.builder().put("cluster.service.slow_master_task_logging_threshold", "8s").build(); assertEquals( TimeValue.timeValueSeconds(8), - MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings) + ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings) ); - assertSettingDeprecationsAndWarnings(new Setting[] { MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING }); + assertSettingDeprecationsAndWarnings(new Setting[] { ClusterManagerService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING }); } /** @@ -89,11 +89,11 @@ public void testSettingGetValueWhenBothAreConfigured() { .build(); assertEquals( TimeValue.timeValueSeconds(9), - MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings) + ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings) ); - assertEquals(TimeValue.timeValueSeconds(8), MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings)); - assertSettingDeprecationsAndWarnings(new Setting[] { MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING }); + assertEquals(TimeValue.timeValueSeconds(8), ClusterManagerService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings)); + assertSettingDeprecationsAndWarnings(new Setting[] { ClusterManagerService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING }); } } diff --git a/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java b/server/src/test/java/org/opensearch/cluster/service/ClusterManagerServiceTests.java similarity index 92% rename from server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java rename to server/src/test/java/org/opensearch/cluster/service/ClusterManagerServiceTests.java index d5f7344c544b9..7c7a725032b52 100644 --- a/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/ClusterManagerServiceTests.java @@ -93,14 +93,14 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasKey; -public class MasterServiceTests extends OpenSearchTestCase { +public class ClusterManagerServiceTests extends OpenSearchTestCase { private static ThreadPool threadPool; private static long relativeTimeInMillis; @BeforeClass public static void createThreadPool() { - threadPool = new TestThreadPool(MasterServiceTests.class.getName()) { + threadPool = new TestThreadPool(ClusterManagerServiceTests.class.getName()) { @Override public long relativeTimeInMillis() { return relativeTimeInMillis; @@ -121,17 +121,17 @@ public void randomizeCurrentTime() { relativeTimeInMillis = randomLongBetween(0L, 1L << 62); } - private MasterService createClusterManagerService(boolean makeClusterManager) { + private ClusterManagerService createClusterManagerService(boolean makeClusterManager) { final DiscoveryNode localNode = new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); - final MasterService clusterManagerService = new MasterService( + final ClusterManagerService clusterManagerService = new ClusterManagerService( Settings.builder() - .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), MasterServiceTests.class.getSimpleName()) + .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), ClusterManagerServiceTests.class.getSimpleName()) .put(Node.NODE_NAME_SETTING.getKey(), "test_node") .build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool ); - final ClusterState initialClusterState = ClusterState.builder(new ClusterName(MasterServiceTests.class.getSimpleName())) + final ClusterState initialClusterState = ClusterState.builder(new ClusterName(ClusterManagerServiceTests.class.getSimpleName())) .nodes( DiscoveryNodes.builder() .add(localNode) @@ -151,7 +151,7 @@ private MasterService createClusterManagerService(boolean makeClusterManager) { } public void testClusterManagerAwareExecution() throws Exception { - final MasterService nonClusterManager = createClusterManagerService(false); + final ClusterManagerService nonClusterManager = createClusterManagerService(false); final boolean[] taskFailed = { false }; final CountDownLatch latch1 = new CountDownLatch(1); @@ -194,7 +194,7 @@ public void onFailure(String source, Exception e) { } public void testThreadContext() throws InterruptedException { - final MasterService clusterManager = createClusterManagerService(true); + final ClusterManagerService clusterManager = createClusterManagerService(true); final CountDownLatch latch = new CountDownLatch(1); try (ThreadContext.StoredContext ignored = threadPool.getThreadContext().stashContext()) { @@ -292,7 +292,7 @@ public void testClusterStateTaskListenerThrowingExceptionIsOkay() throws Interru final CountDownLatch latch = new CountDownLatch(1); AtomicBoolean published = new AtomicBoolean(); - try (MasterService clusterManagerService = createClusterManagerService(true)) { + try (ClusterManagerService clusterManagerService = createClusterManagerService(true)) { clusterManagerService.submitStateUpdateTask( "testClusterStateTaskListenerThrowingExceptionIsOkay", new Object(), @@ -328,11 +328,11 @@ public void onFailure(String source, Exception e) {} @TestLogging(value = "org.opensearch.cluster.service:TRACE", reason = "to ensure that we log cluster state events on TRACE level") public void testClusterStateUpdateLogging() throws Exception { - try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(MasterService.class))) { + try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(ClusterManagerService.class))) { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test1 start", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.DEBUG, "executing cluster state update for [test1]" ) @@ -340,7 +340,7 @@ public void testClusterStateUpdateLogging() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test1 computation", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.DEBUG, "took [1s] to compute cluster state update for [test1]" ) @@ -348,7 +348,7 @@ public void testClusterStateUpdateLogging() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test1 notification", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.DEBUG, "took [0s] to notify listeners on unchanged cluster state for [test1]" ) @@ -357,7 +357,7 @@ public void testClusterStateUpdateLogging() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test2 start", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.DEBUG, "executing cluster state update for [test2]" ) @@ -365,7 +365,7 @@ public void testClusterStateUpdateLogging() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test2 failure", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.TRACE, "failed to execute cluster state update (on version: [*], uuid: [*]) for [test2]*" ) @@ -373,7 +373,7 @@ public void testClusterStateUpdateLogging() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test2 computation", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.DEBUG, "took [2s] to compute cluster state update for [test2]" ) @@ -381,7 +381,7 @@ public void testClusterStateUpdateLogging() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test2 notification", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.DEBUG, "took [0s] to notify listeners on unchanged cluster state for [test2]" ) @@ -390,7 +390,7 @@ public void testClusterStateUpdateLogging() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test3 start", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.DEBUG, "executing cluster state update for [test3]" ) @@ -398,7 +398,7 @@ public void testClusterStateUpdateLogging() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test3 computation", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.DEBUG, "took [3s] to compute cluster state update for [test3]" ) @@ -406,7 +406,7 @@ public void testClusterStateUpdateLogging() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test3 notification", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.DEBUG, "took [4s] to notify listeners on successful publication of cluster state (version: *, uuid: *) for [test3]" ) @@ -415,13 +415,13 @@ public void testClusterStateUpdateLogging() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test4", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.DEBUG, "executing cluster state update for [test4]" ) ); - try (MasterService clusterManagerService = createClusterManagerService(true)) { + try (ClusterManagerService clusterManagerService = createClusterManagerService(true)) { clusterManagerService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { @@ -617,7 +617,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS } }; - try (MasterService clusterManagerService = createClusterManagerService(true)) { + try (ClusterManagerService clusterManagerService = createClusterManagerService(true)) { final ConcurrentMap submittedTasksPerThread = new ConcurrentHashMap<>(); CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads); for (int i = 0; i < numberOfThreads; i++) { @@ -696,7 +696,7 @@ public void testBlockingCallInClusterStateTaskListenerFails() throws Interrupted final CountDownLatch latch = new CountDownLatch(1); final AtomicReference assertionRef = new AtomicReference<>(); - try (MasterService clusterManagerService = createClusterManagerService(true)) { + try (ClusterManagerService clusterManagerService = createClusterManagerService(true)) { clusterManagerService.submitStateUpdateTask( "testBlockingCallInClusterStateTaskListenerFails", new Object(), @@ -737,11 +737,11 @@ public void onFailure(String source, Exception e) {} @TestLogging(value = "org.opensearch.cluster.service:WARN", reason = "to ensure that we log cluster state events on WARN level") public void testLongClusterStateUpdateLogging() throws Exception { - try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(MasterService.class))) { + try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(ClusterManagerService.class))) { mockAppender.addExpectation( new MockLogAppender.UnseenEventExpectation( "test1 shouldn't log because it was fast enough", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.WARN, "*took*test1*" ) @@ -749,7 +749,7 @@ public void testLongClusterStateUpdateLogging() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test2", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.WARN, "*took [*], which is over [10s], to compute cluster state update for [test2]" ) @@ -757,7 +757,7 @@ public void testLongClusterStateUpdateLogging() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test3", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.WARN, "*took [*], which is over [10s], to compute cluster state update for [test3]" ) @@ -765,7 +765,7 @@ public void testLongClusterStateUpdateLogging() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test4", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.WARN, "*took [*], which is over [10s], to compute cluster state update for [test4]" ) @@ -773,7 +773,7 @@ public void testLongClusterStateUpdateLogging() throws Exception { mockAppender.addExpectation( new MockLogAppender.UnseenEventExpectation( "test5 should not log despite publishing slowly", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.WARN, "*took*test5*" ) @@ -781,16 +781,16 @@ public void testLongClusterStateUpdateLogging() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test6 should log due to slow and failing publication", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.WARN, "took [*] and then failed to publish updated cluster state (version: *, uuid: *) for [test6]:*" ) ); try ( - MasterService clusterManagerService = new MasterService( + ClusterManagerService clusterManagerService = new ClusterManagerService( Settings.builder() - .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), MasterServiceTests.class.getSimpleName()) + .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), ClusterManagerServiceTests.class.getSimpleName()) .put(Node.NODE_NAME_SETTING.getKey(), "test_node") .build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), @@ -805,19 +805,21 @@ public void testLongClusterStateUpdateLogging() throws Exception { emptySet(), Version.CURRENT ); - final ClusterState initialClusterState = ClusterState.builder(new ClusterName(MasterServiceTests.class.getSimpleName())) + final ClusterState initialClusterState = ClusterState.builder( + new ClusterName(ClusterManagerServiceTests.class.getSimpleName()) + ) .nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).masterNodeId(localNode.getId())) .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK) .build(); final AtomicReference clusterStateRef = new AtomicReference<>(initialClusterState); clusterManagerService.setClusterStatePublisher((event, publishListener, ackListener) -> { if (event.source().contains("test5")) { - relativeTimeInMillis += MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( + relativeTimeInMillis += ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( Settings.EMPTY ).millis() + randomLongBetween(1, 1000000); } if (event.source().contains("test6")) { - relativeTimeInMillis += MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( + relativeTimeInMillis += ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( Settings.EMPTY ).millis() + randomLongBetween(1, 1000000); throw new OpenSearchException("simulated error during slow publication which should trigger logging"); @@ -835,7 +837,7 @@ public void testLongClusterStateUpdateLogging() throws Exception { public ClusterState execute(ClusterState currentState) { relativeTimeInMillis += randomLongBetween( 0L, - MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY).millis() + ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY).millis() ); return currentState; } @@ -856,7 +858,7 @@ public void onFailure(String source, Exception e) { clusterManagerService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { - relativeTimeInMillis += MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( + relativeTimeInMillis += ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( Settings.EMPTY ).millis() + randomLongBetween(1, 1000000); throw new IllegalArgumentException("Testing handling of exceptions in the cluster state task"); @@ -875,7 +877,7 @@ public void onFailure(String source, Exception e) { clusterManagerService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { - relativeTimeInMillis += MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( + relativeTimeInMillis += ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( Settings.EMPTY ).millis() + randomLongBetween(1, 1000000); return ClusterState.builder(currentState).incrementVersion().build(); @@ -894,7 +896,7 @@ public void onFailure(String source, Exception e) { clusterManagerService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { - relativeTimeInMillis += MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( + relativeTimeInMillis += ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( Settings.EMPTY ).millis() + randomLongBetween(1, 1000000); return currentState; @@ -971,9 +973,9 @@ public void testAcking() throws InterruptedException { final DiscoveryNode node2 = new DiscoveryNode("node2", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); final DiscoveryNode node3 = new DiscoveryNode("node3", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); try ( - MasterService clusterManagerService = new MasterService( + ClusterManagerService clusterManagerService = new ClusterManagerService( Settings.builder() - .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), MasterServiceTests.class.getSimpleName()) + .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), ClusterManagerServiceTests.class.getSimpleName()) .put(Node.NODE_NAME_SETTING.getKey(), "test_node") .build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), @@ -981,7 +983,7 @@ public void testAcking() throws InterruptedException { ) ) { - final ClusterState initialClusterState = ClusterState.builder(new ClusterName(MasterServiceTests.class.getSimpleName())) + final ClusterState initialClusterState = ClusterState.builder(new ClusterName(ClusterManagerServiceTests.class.getSimpleName())) .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3).localNodeId(node1.getId()).masterNodeId(node1.getId())) .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK) .build(); @@ -1101,7 +1103,7 @@ public void onAckTimeout() { /** * Returns the cluster state that the cluster-manager service uses (and that is provided by the discovery layer) */ - public static ClusterState discoveryState(MasterService clusterManagerService) { + public static ClusterState discoveryState(ClusterManagerService clusterManagerService) { return clusterManagerService.state(); } diff --git a/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java b/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java index efcefab6c9f8b..5588e9c1ceba8 100644 --- a/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java +++ b/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java @@ -37,7 +37,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.RerouteService; import org.opensearch.cluster.service.ClusterApplier; -import org.opensearch.cluster.service.MasterService; +import org.opensearch.cluster.service.ClusterManagerService; import org.opensearch.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.ClusterSettings; @@ -70,7 +70,7 @@ public class DiscoveryModuleTests extends OpenSearchTestCase { private TransportService transportService; private NamedWriteableRegistry namedWriteableRegistry; - private MasterService clusterManagerService; + private ClusterManagerService clusterManagerService; private ClusterApplier clusterApplier; private ThreadPool threadPool; private ClusterSettings clusterSettings; @@ -93,7 +93,7 @@ public void setupDummyServices() { threadPool = mock(ThreadPool.class); when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null); - clusterManagerService = mock(MasterService.class); + clusterManagerService = mock(ClusterManagerService.class); namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); clusterApplier = mock(ClusterApplier.class); clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); diff --git a/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index d38d31f3ef43b..b686641eef6c3 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -47,7 +47,7 @@ import org.opensearch.cluster.action.shard.ShardStateAction; import org.opensearch.cluster.block.ClusterBlock; import org.opensearch.cluster.block.ClusterBlocks; -import org.opensearch.cluster.coordination.NoMasterBlockService; +import org.opensearch.cluster.coordination.NoClusterManagerBlockService; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; @@ -369,20 +369,22 @@ public ClusterState randomlyUpdateClusterState( Supplier indicesServiceSupplier ) { // randomly remove no_cluster_manager blocks - if (randomBoolean() && state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)) { + if (randomBoolean() && state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID)) { state = ClusterState.builder(state) - .blocks(ClusterBlocks.builder().blocks(state.blocks()).removeGlobalBlock(NoMasterBlockService.NO_MASTER_BLOCK_ID)) + .blocks(ClusterBlocks.builder().blocks(state.blocks()).removeGlobalBlock(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID)) .build(); } // randomly add no_cluster_manager blocks - if (rarely() && state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID) == false) { - ClusterBlock block = randomBoolean() ? NoMasterBlockService.NO_MASTER_BLOCK_ALL : NoMasterBlockService.NO_MASTER_BLOCK_WRITES; + if (rarely() && state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID) == false) { + ClusterBlock block = randomBoolean() + ? NoClusterManagerBlockService.NO_MASTER_BLOCK_ALL + : NoClusterManagerBlockService.NO_MASTER_BLOCK_WRITES; state = ClusterState.builder(state).blocks(ClusterBlocks.builder().blocks(state.blocks()).addGlobalBlock(block)).build(); } // if no_cluster_manager block is in place, make no other cluster state changes - if (state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)) { + if (state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID)) { return state; } diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 9558e898f8832..6faaa6973953c 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -144,8 +144,8 @@ import org.opensearch.cluster.routing.allocation.command.AllocateEmptyPrimaryAllocationCommand; import org.opensearch.cluster.service.ClusterApplierService; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.cluster.service.FakeThreadPoolMasterService; -import org.opensearch.cluster.service.MasterService; +import org.opensearch.cluster.service.FakeThreadPoolClusterManagerService; +import org.opensearch.cluster.service.ClusterManagerService; import org.opensearch.common.CheckedConsumer; import org.opensearch.common.Nullable; import org.opensearch.common.io.stream.NamedWriteableRegistry; @@ -1643,7 +1643,7 @@ private final class TestClusterNode { private final DiscoveryNode node; - private final MasterService clusterManagerService; + private final ClusterManagerService clusterManagerService; private final AllocationService allocationService; @@ -1663,7 +1663,7 @@ private final class TestClusterNode { this.node = node; final Environment environment = createEnvironment(node.getName()); threadPool = deterministicTaskQueue.getThreadPool(runnable -> CoordinatorTests.onNodeLog(node, runnable)); - clusterManagerService = new FakeThreadPoolMasterService( + clusterManagerService = new FakeThreadPoolClusterManagerService( node.getName(), "test", threadPool, diff --git a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java index e341a330e2c4c..0e0b466408f77 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java @@ -54,7 +54,7 @@ import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.service.ClusterApplierService; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.cluster.service.FakeThreadPoolMasterService; +import org.opensearch.cluster.service.FakeThreadPoolClusterManagerService; import org.opensearch.common.Nullable; import org.opensearch.common.Randomness; import org.opensearch.common.UUIDs; @@ -139,7 +139,7 @@ import static org.opensearch.cluster.coordination.LeaderChecker.LEADER_CHECK_INTERVAL_SETTING; import static org.opensearch.cluster.coordination.LeaderChecker.LEADER_CHECK_RETRY_COUNT_SETTING; import static org.opensearch.cluster.coordination.LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING; -import static org.opensearch.cluster.coordination.NoMasterBlockService.NO_MASTER_BLOCK_ID; +import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_MASTER_BLOCK_ID; import static org.opensearch.cluster.coordination.Reconfigurator.CLUSTER_AUTO_SHRINK_VOTING_CONFIGURATION; import static org.opensearch.discovery.PeerFinder.DISCOVERY_FIND_PEERS_INTERVAL_SETTING; import static org.opensearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; @@ -1510,7 +1510,7 @@ int getSuccessfulAckIndex(ClusterNode clusterNode) { } } - static class AckedFakeThreadPoolClusterManagerService extends FakeThreadPoolMasterService { + static class AckedFakeThreadPoolClusterManagerService extends FakeThreadPoolClusterManagerService { AckCollector nextAckCollector = new AckCollector(); diff --git a/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolMasterService.java b/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerService.java similarity index 96% rename from test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolMasterService.java rename to test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerService.java index c532a0cc36472..46893fe08bd76 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolMasterService.java +++ b/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerService.java @@ -55,8 +55,8 @@ import static org.apache.lucene.tests.util.LuceneTestCase.random; import static org.opensearch.test.OpenSearchTestCase.randomInt; -public class FakeThreadPoolMasterService extends MasterService { - private static final Logger logger = LogManager.getLogger(FakeThreadPoolMasterService.class); +public class FakeThreadPoolClusterManagerService extends ClusterManagerService { + private static final Logger logger = LogManager.getLogger(FakeThreadPoolClusterManagerService.class); private final String name; private final List pendingTasks = new ArrayList<>(); @@ -65,7 +65,7 @@ public class FakeThreadPoolMasterService extends MasterService { private boolean taskInProgress = false; private boolean waitForPublish = false; - public FakeThreadPoolMasterService( + public FakeThreadPoolClusterManagerService( String nodeName, String serviceName, ThreadPool threadPool, @@ -137,7 +137,7 @@ public void run() { if (waitForPublish == false) { taskInProgress = false; } - FakeThreadPoolMasterService.this.scheduleNextTaskIfNecessary(); + FakeThreadPoolClusterManagerService.this.scheduleNextTaskIfNecessary(); } }); } diff --git a/test/framework/src/main/java/org/opensearch/test/ClusterServiceUtils.java b/test/framework/src/main/java/org/opensearch/test/ClusterServiceUtils.java index f709d8bcaff34..93c437d3cf91b 100644 --- a/test/framework/src/main/java/org/opensearch/test/ClusterServiceUtils.java +++ b/test/framework/src/main/java/org/opensearch/test/ClusterServiceUtils.java @@ -47,7 +47,7 @@ import org.opensearch.cluster.service.ClusterApplier.ClusterApplyListener; import org.opensearch.cluster.service.ClusterApplierService; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.cluster.service.MasterService; +import org.opensearch.cluster.service.ClusterManagerService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.node.Node; @@ -61,8 +61,8 @@ public class ClusterServiceUtils { - public static MasterService createMasterService(ThreadPool threadPool, ClusterState initialClusterState) { - MasterService clusterManagerService = new MasterService( + public static ClusterManagerService createMasterService(ThreadPool threadPool, ClusterState initialClusterState) { + ClusterManagerService clusterManagerService = new ClusterManagerService( Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test_cluster_manager_node").build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool @@ -77,7 +77,7 @@ public static MasterService createMasterService(ThreadPool threadPool, ClusterSt return clusterManagerService; } - public static MasterService createMasterService(ThreadPool threadPool, DiscoveryNode localNode) { + public static ClusterManagerService createMasterService(ThreadPool threadPool, DiscoveryNode localNode) { ClusterState initialClusterState = ClusterState.builder(new ClusterName(ClusterServiceUtils.class.getSimpleName())) .nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).masterNodeId(localNode.getId())) .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK) @@ -114,7 +114,7 @@ public void onFailure(String source, Exception e) { } } - public static void setState(MasterService executor, ClusterState clusterState) { + public static void setState(ClusterManagerService executor, ClusterState clusterState) { CountDownLatch latch = new CountDownLatch(1); executor.submitStateUpdateTask("test setting state", new ClusterStateUpdateTask() { @Override diff --git a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java index 637dff1d8169f..93f0bf8cb6190 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java @@ -55,7 +55,7 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.action.index.MappingUpdatedAction; import org.opensearch.cluster.coordination.ClusterBootstrapService; -import org.opensearch.cluster.coordination.NoMasterBlockService; +import org.opensearch.cluster.coordination.NoClusterManagerBlockService; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; @@ -434,7 +434,7 @@ public InternalTestCluster( ); // TODO: currently we only randomize "cluster.no_cluster_manager_block" between "write" and "metadata_write", as "all" is fragile // and fails shards when a cluster-manager abdicates, which breaks many tests. - builder.put(NoMasterBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), randomFrom(random, "write", "metadata_write")); + builder.put(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), randomFrom(random, "write", "metadata_write")); defaultSettings = builder.build(); executor = OpenSearchExecutors.newScaling( "internal_test_cluster_executor", diff --git a/test/framework/src/main/java/org/opensearch/test/RandomObjects.java b/test/framework/src/main/java/org/opensearch/test/RandomObjects.java index 8beac9e441787..a0227c51c8085 100644 --- a/test/framework/src/main/java/org/opensearch/test/RandomObjects.java +++ b/test/framework/src/main/java/org/opensearch/test/RandomObjects.java @@ -43,7 +43,7 @@ import org.opensearch.action.support.replication.ReplicationResponse.ShardInfo; import org.opensearch.action.support.replication.ReplicationResponse.ShardInfo.Failure; import org.opensearch.cluster.block.ClusterBlockException; -import org.opensearch.cluster.coordination.NoMasterBlockService; +import org.opensearch.cluster.coordination.NoClusterManagerBlockService; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; @@ -331,7 +331,7 @@ private static Tuple randomShardInfoFailure(Random random) { int type = randomIntBetween(random, 0, 3); switch (type) { case 0: - actualException = new ClusterBlockException(singleton(NoMasterBlockService.NO_MASTER_BLOCK_WRITES)); + actualException = new ClusterBlockException(singleton(NoClusterManagerBlockService.NO_MASTER_BLOCK_WRITES)); expectedException = new OpenSearchException( "OpenSearch exception [type=cluster_block_exception, " + "reason=blocked by: [SERVICE_UNAVAILABLE/2/no cluster-manager];]" diff --git a/test/framework/src/main/java/org/opensearch/test/disruption/BlockMasterServiceOnMaster.java b/test/framework/src/main/java/org/opensearch/test/disruption/BlockClusterManagerServiceOnClusterManager.java similarity index 96% rename from test/framework/src/main/java/org/opensearch/test/disruption/BlockMasterServiceOnMaster.java rename to test/framework/src/main/java/org/opensearch/test/disruption/BlockClusterManagerServiceOnClusterManager.java index 85f8e5c250066..8edb508625341 100644 --- a/test/framework/src/main/java/org/opensearch/test/disruption/BlockMasterServiceOnMaster.java +++ b/test/framework/src/main/java/org/opensearch/test/disruption/BlockClusterManagerServiceOnClusterManager.java @@ -43,11 +43,11 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; -public class BlockMasterServiceOnMaster extends SingleNodeDisruption { +public class BlockClusterManagerServiceOnClusterManager extends SingleNodeDisruption { AtomicReference disruptionLatch = new AtomicReference<>(); - public BlockMasterServiceOnMaster(Random random) { + public BlockClusterManagerServiceOnClusterManager(Random random) { super(random); } diff --git a/test/framework/src/main/java/org/opensearch/test/disruption/BusyMasterServiceDisruption.java b/test/framework/src/main/java/org/opensearch/test/disruption/BusyClusterManagerServiceDisruption.java similarity index 95% rename from test/framework/src/main/java/org/opensearch/test/disruption/BusyMasterServiceDisruption.java rename to test/framework/src/main/java/org/opensearch/test/disruption/BusyClusterManagerServiceDisruption.java index 4830f9b0359fb..55eb90a4dc89d 100644 --- a/test/framework/src/main/java/org/opensearch/test/disruption/BusyMasterServiceDisruption.java +++ b/test/framework/src/main/java/org/opensearch/test/disruption/BusyClusterManagerServiceDisruption.java @@ -41,11 +41,11 @@ import java.util.Random; import java.util.concurrent.atomic.AtomicBoolean; -public class BusyMasterServiceDisruption extends SingleNodeDisruption { +public class BusyClusterManagerServiceDisruption extends SingleNodeDisruption { private final AtomicBoolean active = new AtomicBoolean(); private final Priority priority; - public BusyMasterServiceDisruption(Random random, Priority priority) { + public BusyClusterManagerServiceDisruption(Random random, Priority priority) { super(random); this.priority = priority; } diff --git a/test/framework/src/test/java/org/opensearch/cluster/service/FakeThreadPoolMasterServiceTests.java b/test/framework/src/test/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerServiceTests.java similarity index 97% rename from test/framework/src/test/java/org/opensearch/cluster/service/FakeThreadPoolMasterServiceTests.java rename to test/framework/src/test/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerServiceTests.java index dabeeb6742ec4..f09e62fa574e6 100644 --- a/test/framework/src/test/java/org/opensearch/cluster/service/FakeThreadPoolMasterServiceTests.java +++ b/test/framework/src/test/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerServiceTests.java @@ -61,7 +61,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -public class FakeThreadPoolMasterServiceTests extends OpenSearchTestCase { +public class FakeThreadPoolClusterManagerServiceTests extends OpenSearchTestCase { public void testFakeClusterManagerService() { List runnableTasks = new ArrayList<>(); @@ -84,7 +84,7 @@ public void testFakeClusterManagerService() { doAnswer(invocationOnMock -> runnableTasks.add((Runnable) invocationOnMock.getArguments()[0])).when(executorService).execute(any()); when(mockThreadPool.generic()).thenReturn(executorService); - FakeThreadPoolMasterService clusterManagerService = new FakeThreadPoolMasterService( + FakeThreadPoolClusterManagerService clusterManagerService = new FakeThreadPoolClusterManagerService( "test_node", "test", mockThreadPool, From dda0876bc8f6537f410f946cd7ba255fc9c2bb85 Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Tue, 12 Jul 2022 16:09:31 -0700 Subject: [PATCH 004/104] Add badges for gradle check, gha workflow, documentation & forum (#3863) * Add badge lables for gradle check, gha workflow, documentation & forum Signed-off-by: Suraj Singh * Update public jenkins gradle check label Signed-off-by: Suraj Singh --- README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.md b/README.md index 4ae514baa789d..5c5366b209fc9 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,10 @@ [![codecov](https://codecov.io/gh/opensearch-project/OpenSearch/branch/main/graph/badge.svg)](https://codecov.io/gh/opensearch-project/OpenSearch) +[![GHA gradle check](https://github.com/opensearch-project/OpenSearch/actions/workflows/gradle-check.yml/badge.svg)](https://github.com/opensearch-project/OpenSearch/actions/workflows/gradle-check.yml) +[![GHA validate pull request](https://github.com/opensearch-project/OpenSearch/actions/workflows/wrapper.yml/badge.svg)](https://github.com/opensearch-project/OpenSearch/actions/workflows/wrapper.yml) +[![GHA precommit](https://github.com/opensearch-project/OpenSearch/actions/workflows/precommit.yml/badge.svg)](https://github.com/opensearch-project/OpenSearch/actions/workflows/precommit.yml) +[![Jenkins gradle check job](https://img.shields.io/jenkins/build?jobUrl=https%3A%2F%2Fbuild.ci.opensearch.org%2Fjob%2Fgradle-check%2F&label=Jenkins%20Gradle%20Check)](https://build.ci.opensearch.org/job/gradle-check/) +[![Documentation](https://img.shields.io/badge/doc-reference-blue)](https://opensearch.org/docs/latest/opensearch/index/) +[![Chat](https://img.shields.io/badge/chat-on%20forums-blue)](https://forum.opensearch.org/c/opensearch/) From b067d2aa57c2a8039facc0cb96bc99f0ddc3d5e8 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Tue, 12 Jul 2022 22:39:55 -0400 Subject: [PATCH 005/104] Update Netty to 4.1.79.Final (#3868) Signed-off-by: Andriy Redko --- buildSrc/version.properties | 2 +- .../licenses/netty-buffer-4.1.78.Final.jar.sha1 | 1 - .../licenses/netty-buffer-4.1.79.Final.jar.sha1 | 1 + .../transport-netty4/licenses/netty-codec-4.1.78.Final.jar.sha1 | 1 - .../transport-netty4/licenses/netty-codec-4.1.79.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.78.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.79.Final.jar.sha1 | 1 + .../licenses/netty-common-4.1.78.Final.jar.sha1 | 1 - .../licenses/netty-common-4.1.79.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.78.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.79.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.78.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.79.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.78.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.79.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.78.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.79.Final.jar.sha1 | 1 + .../licenses/netty-codec-dns-4.1.78.Final.jar.sha1 | 1 - .../licenses/netty-codec-dns-4.1.79.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.78.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.79.Final.jar.sha1 | 1 + .../licenses/netty-codec-socks-4.1.78.Final.jar.sha1 | 1 - .../licenses/netty-codec-socks-4.1.79.Final.jar.sha1 | 1 + .../licenses/netty-handler-proxy-4.1.78.Final.jar.sha1 | 1 - .../licenses/netty-handler-proxy-4.1.79.Final.jar.sha1 | 1 + .../licenses/netty-resolver-dns-4.1.78.Final.jar.sha1 | 1 - .../licenses/netty-resolver-dns-4.1.79.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.78.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.79.Final.jar.sha1 | 1 + .../repository-hdfs/licenses/netty-all-4.1.78.Final.jar.sha1 | 1 - .../repository-hdfs/licenses/netty-all-4.1.79.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-buffer-4.1.78.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-buffer-4.1.79.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-codec-4.1.78.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-codec-4.1.79.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.78.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.79.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-common-4.1.78.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-common-4.1.79.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-handler-4.1.78.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-handler-4.1.79.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-resolver-4.1.78.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-resolver-4.1.79.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.78.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.79.Final.jar.sha1 | 1 + 45 files changed, 23 insertions(+), 23 deletions(-) delete mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.78.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.79.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.78.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.79.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.78.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.79.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-common-4.1.78.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-common-4.1.79.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.78.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.79.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.78.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.79.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.78.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.79.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.78.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.78.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.79.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.78.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.78.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.79.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.78.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.79.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.78.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.79.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.78.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.78.Final.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.79.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.78.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.79.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.78.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.79.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.78.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.79.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-common-4.1.78.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-common-4.1.79.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.78.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.79.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.78.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.79.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.78.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.79.Final.jar.sha1 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index fe1ff2782274f..75514f9738190 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -21,7 +21,7 @@ asm = 9.3 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.5.0 -netty = 4.1.78.Final +netty = 4.1.79.Final joda = 2.10.12 # client dependencies diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.78.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.78.Final.jar.sha1 deleted file mode 100644 index d32cb7202ac0c..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.78.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c0c2d805a2c8ea30223677d8219235f9ec14c38 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..8e9e4d0b7f754 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +6c014412b599489b1db27c6bc08d8a46da94e397 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.78.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.78.Final.jar.sha1 deleted file mode 100644 index 801c8d5e74de2..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.78.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e8486c8923fc0914df4562a8e0923462e885f88a \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..c0920231d79a8 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +18f5b02af7ca611978bc28f2cb58cbb3b9b0f0ef \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.78.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.78.Final.jar.sha1 deleted file mode 100644 index 473c81d37a969..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.78.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b636591e973418479f2d08e881b12be61845aa6f \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..a3f650da5abbd --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +882c70bc0a30a98bf3ce477f043e967ac026044c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.78.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.78.Final.jar.sha1 deleted file mode 100644 index 3e2d3ec9e5106..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.78.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d6f560098050f88ba11750aa856edd955e4a7707 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..faa7b099406a3 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +2814bd465731355323aba0fdd22163bfce638a75 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.78.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.78.Final.jar.sha1 deleted file mode 100644 index f597dcc2030a7..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.78.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7e902b6018378bb700ec9364b1d0fba6eefd99fd \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..8e314f164da69 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +2dc22423c8ed19906615fb936a5fcb7db14a4e6c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.78.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.78.Final.jar.sha1 deleted file mode 100644 index 88a70642001e5..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.78.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -691170d70a979757d50f60e16121ced5f411a9b8 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..af550935bb911 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +55ecb1ff4464b56564a90824a741c3911264aaa4 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.78.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.78.Final.jar.sha1 deleted file mode 100644 index faa5512a44973..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.78.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b1639d431e43622d6cbfdd45c30d3fb810fa9101 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..c6e18efb3ad3d --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +6cc2b49749b4fbcc39c687027e04e65e857552a9 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.78.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.78.Final.jar.sha1 deleted file mode 100644 index 58224c6d6bddb..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.78.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -62c64e182a11e31ad80e68a94f0b02b45344df23 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..7f984663dfa85 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +731937caec938b77b39df932a8da8aaca8d5ec05 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.78.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.78.Final.jar.sha1 deleted file mode 100644 index 6bdf8e35a1229..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.78.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6b51ce00cfe34adbe93171088f1c7fbd2a517308 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..a1753b194ea31 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +6c19c46f9529791964f636c93cfaca0556f0d5d0 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.78.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.78.Final.jar.sha1 deleted file mode 100644 index 73f234d843204..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.78.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9a4e44bebe5979b59c9bf4cd879e65c4c52bf869 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..f2989024cfce1 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +0eeffab0cd5efb699d5e4ab9b694d32fef6694b3 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.78.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.78.Final.jar.sha1 deleted file mode 100644 index ba6d08c14941c..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.78.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0a8b9c0e9819e4e2c6c938e8f079683a65d85faf \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..913f0e7685c86 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +794a5937cdb1871c4ae350610752dec2929dc1d6 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.78.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.78.Final.jar.sha1 deleted file mode 100644 index 86e407b5a171d..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.78.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f9e5ad080d1d1a9a82ba598814e6f5a229db4594 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..dbb072f3f665f --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +054aace8683de7893cf28d4aab72cd60f49b5700 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.78.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.78.Final.jar.sha1 deleted file mode 100644 index f85de54545500..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.78.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -84fc82d8ff839d85c81979a5dc2525857b9a8837 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..a5d1be00d9c29 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +8eb9be9b6a66a03f5f4df67fe559cb676493d167 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.78.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.78.Final.jar.sha1 deleted file mode 100644 index 58224c6d6bddb..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.78.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -62c64e182a11e31ad80e68a94f0b02b45344df23 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..7f984663dfa85 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +731937caec938b77b39df932a8da8aaca8d5ec05 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.78.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.78.Final.jar.sha1 deleted file mode 100644 index a32faf2eba6cc..0000000000000 --- a/plugins/repository-hdfs/licenses/netty-all-4.1.78.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c64dedb62484d9fe9dfe1d1e877ef480fd1d0957 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.79.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..724950db96f09 --- /dev/null +++ b/plugins/repository-hdfs/licenses/netty-all-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +1c53cffaa14d61de523b167377843e35807292a7 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.78.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.78.Final.jar.sha1 deleted file mode 100644 index d32cb7202ac0c..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.78.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c0c2d805a2c8ea30223677d8219235f9ec14c38 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..8e9e4d0b7f754 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +6c014412b599489b1db27c6bc08d8a46da94e397 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.78.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.78.Final.jar.sha1 deleted file mode 100644 index 801c8d5e74de2..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.78.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e8486c8923fc0914df4562a8e0923462e885f88a \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..c0920231d79a8 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +18f5b02af7ca611978bc28f2cb58cbb3b9b0f0ef \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.78.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.78.Final.jar.sha1 deleted file mode 100644 index 473c81d37a969..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.78.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b636591e973418479f2d08e881b12be61845aa6f \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..a3f650da5abbd --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +882c70bc0a30a98bf3ce477f043e967ac026044c \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.78.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.78.Final.jar.sha1 deleted file mode 100644 index 3e2d3ec9e5106..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.78.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d6f560098050f88ba11750aa856edd955e4a7707 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..faa7b099406a3 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +2814bd465731355323aba0fdd22163bfce638a75 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.78.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.78.Final.jar.sha1 deleted file mode 100644 index f597dcc2030a7..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.78.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7e902b6018378bb700ec9364b1d0fba6eefd99fd \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..8e314f164da69 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +2dc22423c8ed19906615fb936a5fcb7db14a4e6c \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.78.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.78.Final.jar.sha1 deleted file mode 100644 index 88a70642001e5..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.78.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -691170d70a979757d50f60e16121ced5f411a9b8 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..af550935bb911 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +55ecb1ff4464b56564a90824a741c3911264aaa4 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.78.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.78.Final.jar.sha1 deleted file mode 100644 index faa5512a44973..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.78.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b1639d431e43622d6cbfdd45c30d3fb810fa9101 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.79.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.79.Final.jar.sha1 new file mode 100644 index 0000000000000..c6e18efb3ad3d --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.79.Final.jar.sha1 @@ -0,0 +1 @@ +6cc2b49749b4fbcc39c687027e04e65e857552a9 \ No newline at end of file From 2763e8086565d949f5cfdbe193539e0f8d37748a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Jul 2022 22:41:30 -0400 Subject: [PATCH 006/104] Bump com.diffplug.spotless from 6.7.0 to 6.8.0 (#3761) Bumps com.diffplug.spotless from 6.7.0 to 6.8.0. --- updated-dependencies: - dependency-name: com.diffplug.spotless dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.gradle b/build.gradle index 50e96537f7c83..73dd3ca9a8157 100644 --- a/build.gradle +++ b/build.gradle @@ -48,7 +48,7 @@ plugins { id 'lifecycle-base' id 'opensearch.docker-support' id 'opensearch.global-build-info' - id "com.diffplug.spotless" version "6.7.0" apply false + id "com.diffplug.spotless" version "6.8.0" apply false id "org.gradle.test-retry" version "1.4.0" apply false id "test-report-aggregation" id 'jacoco-report-aggregation' From f7894f7f29c384631503a2f76e3a4a88d9b61125 Mon Sep 17 00:00:00 2001 From: "Daniel (dB.) Doubrovkine" Date: Wed, 13 Jul 2022 14:43:24 -0300 Subject: [PATCH 007/104] Add option to disable chunked transfer-encoding (#3864) * fix for bug: https://github.com/opensearch-project/OpenSearch/issues/3640 Signed-off-by: Jitendra Kumar * fix for bug: https://github.com/opensearch-project/OpenSearch/issues/3640 Signed-off-by: Jitendra Kumar * fix for bug: https://github.com/opensearch-project/OpenSearch/issues/3640 Signed-off-by: Jitendra Kumar * fix for bug: https://github.com/opensearch-project/OpenSearch/issues/3640 Signed-off-by: Jitendra Kumar * adding chunk support for non-compressed request Signed-off-by: Jitendra Kumar * Diable chunked transfer encoding in integration tests. Signed-off-by: dblock * Replace chunkedTransferEncodingEnabled with chunkedEnabled. Signed-off-by: Daniel (dB.) Doubrovkine * Make chunkedEnabled optional. Signed-off-by: Daniel (dB.) Doubrovkine * Remove Optional constructor. Signed-off-by: Daniel (dB.) Doubrovkine * Remove optionals from constructors. Signed-off-by: Daniel (dB.) Doubrovkine * Use Optional.empty() Signed-off-by: Daniel (dB.) Doubrovkine * Use a mode idiomatic syntax in isChunked. Signed-off-by: Daniel (dB.) Doubrovkine Co-authored-by: Jitendra Kumar --- .../org/opensearch/client/RestClient.java | 142 +++++++++++++-- .../opensearch/client/RestClientBuilder.java | 51 ++++-- .../client/RestClientCompressionTests.java | 165 ++++++++++++++++++ .../client/RestClientSingleHostTests.java | 1 + 4 files changed, 337 insertions(+), 22 deletions(-) create mode 100644 client/rest/src/test/java/org/opensearch/client/RestClientCompressionTests.java diff --git a/client/rest/src/main/java/org/opensearch/client/RestClient.java b/client/rest/src/main/java/org/opensearch/client/RestClient.java index 4f899fd709112..92aed2c8fb179 100644 --- a/client/rest/src/main/java/org/opensearch/client/RestClient.java +++ b/client/rest/src/main/java/org/opensearch/client/RestClient.java @@ -36,6 +36,7 @@ import org.apache.http.ConnectionClosedException; import org.apache.http.Header; import org.apache.http.HttpEntity; +import org.apache.http.entity.HttpEntityWrapper; import org.apache.http.HttpHost; import org.apache.http.HttpRequest; import org.apache.http.HttpResponse; @@ -131,6 +132,29 @@ public class RestClient implements Closeable { private volatile NodeTuple> nodeTuple; private final WarningsHandler warningsHandler; private final boolean compressionEnabled; + private final Optional chunkedEnabled; + + RestClient( + CloseableHttpAsyncClient client, + Header[] defaultHeaders, + List nodes, + String pathPrefix, + FailureListener failureListener, + NodeSelector nodeSelector, + boolean strictDeprecationMode, + boolean compressionEnabled, + boolean chunkedEnabled + ) { + this.client = client; + this.defaultHeaders = Collections.unmodifiableList(Arrays.asList(defaultHeaders)); + this.failureListener = failureListener; + this.pathPrefix = pathPrefix; + this.nodeSelector = nodeSelector; + this.warningsHandler = strictDeprecationMode ? WarningsHandler.STRICT : WarningsHandler.PERMISSIVE; + this.compressionEnabled = compressionEnabled; + this.chunkedEnabled = Optional.of(chunkedEnabled); + setNodes(nodes); + } RestClient( CloseableHttpAsyncClient client, @@ -149,6 +173,7 @@ public class RestClient implements Closeable { this.nodeSelector = nodeSelector; this.warningsHandler = strictDeprecationMode ? WarningsHandler.STRICT : WarningsHandler.PERMISSIVE; this.compressionEnabled = compressionEnabled; + this.chunkedEnabled = Optional.empty(); setNodes(nodes); } @@ -583,36 +608,42 @@ private static void addSuppressedException(Exception suppressedException, Except } } - private static HttpRequestBase createHttpRequest(String method, URI uri, HttpEntity entity, boolean compressionEnabled) { + private HttpRequestBase createHttpRequest(String method, URI uri, HttpEntity entity) { switch (method.toUpperCase(Locale.ROOT)) { case HttpDeleteWithEntity.METHOD_NAME: - return addRequestBody(new HttpDeleteWithEntity(uri), entity, compressionEnabled); + return addRequestBody(new HttpDeleteWithEntity(uri), entity); case HttpGetWithEntity.METHOD_NAME: - return addRequestBody(new HttpGetWithEntity(uri), entity, compressionEnabled); + return addRequestBody(new HttpGetWithEntity(uri), entity); case HttpHead.METHOD_NAME: - return addRequestBody(new HttpHead(uri), entity, compressionEnabled); + return addRequestBody(new HttpHead(uri), entity); case HttpOptions.METHOD_NAME: - return addRequestBody(new HttpOptions(uri), entity, compressionEnabled); + return addRequestBody(new HttpOptions(uri), entity); case HttpPatch.METHOD_NAME: - return addRequestBody(new HttpPatch(uri), entity, compressionEnabled); + return addRequestBody(new HttpPatch(uri), entity); case HttpPost.METHOD_NAME: HttpPost httpPost = new HttpPost(uri); - addRequestBody(httpPost, entity, compressionEnabled); + addRequestBody(httpPost, entity); return httpPost; case HttpPut.METHOD_NAME: - return addRequestBody(new HttpPut(uri), entity, compressionEnabled); + return addRequestBody(new HttpPut(uri), entity); case HttpTrace.METHOD_NAME: - return addRequestBody(new HttpTrace(uri), entity, compressionEnabled); + return addRequestBody(new HttpTrace(uri), entity); default: throw new UnsupportedOperationException("http method not supported: " + method); } } - private static HttpRequestBase addRequestBody(HttpRequestBase httpRequest, HttpEntity entity, boolean compressionEnabled) { + private HttpRequestBase addRequestBody(HttpRequestBase httpRequest, HttpEntity entity) { if (entity != null) { if (httpRequest instanceof HttpEntityEnclosingRequestBase) { if (compressionEnabled) { - entity = new ContentCompressingEntity(entity); + if (chunkedEnabled.isPresent()) { + entity = new ContentCompressingEntity(entity, chunkedEnabled.get()); + } else { + entity = new ContentCompressingEntity(entity); + } + } else if (chunkedEnabled.isPresent()) { + entity = new ContentHttpEntity(entity, chunkedEnabled.get()); } ((HttpEntityEnclosingRequestBase) httpRequest).setEntity(entity); } else { @@ -782,7 +813,7 @@ private class InternalRequest { String ignoreString = params.remove("ignore"); this.ignoreErrorCodes = getIgnoreErrorCodes(ignoreString, request.getMethod()); URI uri = buildUri(pathPrefix, request.getEndpoint(), params); - this.httpRequest = createHttpRequest(request.getMethod(), uri, request.getEntity(), compressionEnabled); + this.httpRequest = createHttpRequest(request.getMethod(), uri, request.getEntity()); this.cancellable = Cancellable.fromRequest(httpRequest); setHeaders(httpRequest, request.getOptions().getHeaders()); setRequestConfig(httpRequest, request.getOptions().getRequestConfig()); @@ -936,6 +967,7 @@ private static Exception extractAndWrapCause(Exception exception) { * A gzip compressing entity that also implements {@code getContent()}. */ public static class ContentCompressingEntity extends GzipCompressingEntity { + private Optional chunkedEnabled; /** * Creates a {@link ContentCompressingEntity} instance with the provided HTTP entity. @@ -944,6 +976,18 @@ public static class ContentCompressingEntity extends GzipCompressingEntity { */ public ContentCompressingEntity(HttpEntity entity) { super(entity); + this.chunkedEnabled = Optional.empty(); + } + + /** + * Creates a {@link ContentCompressingEntity} instance with the provided HTTP entity. + * + * @param entity the HTTP entity. + * @param chunkedEnabled force enable/disable chunked transfer-encoding. + */ + public ContentCompressingEntity(HttpEntity entity, boolean chunkedEnabled) { + super(entity); + this.chunkedEnabled = Optional.of(chunkedEnabled); } @Override @@ -954,6 +998,80 @@ public InputStream getContent() throws IOException { } return out.asInput(); } + + /** + * A gzip compressing entity doesn't work with chunked encoding with sigv4 + * + * @return false + */ + @Override + public boolean isChunked() { + return chunkedEnabled.orElseGet(super::isChunked); + } + + /** + * A gzip entity requires content length in http headers. + * + * @return content length of gzip entity + */ + @Override + public long getContentLength() { + if (chunkedEnabled.isPresent()) { + if (chunkedEnabled.get()) { + return -1L; + } else { + long size; + try (InputStream is = getContent()) { + size = is.readAllBytes().length; + } catch (IOException ex) { + size = -1L; + } + + return size; + } + } else { + return super.getContentLength(); + } + } + } + + /** + * An entity that lets the caller specify the return value of {@code isChunked()}. + */ + public static class ContentHttpEntity extends HttpEntityWrapper { + private Optional chunkedEnabled; + + /** + * Creates a {@link ContentHttpEntity} instance with the provided HTTP entity. + * + * @param entity the HTTP entity. + */ + public ContentHttpEntity(HttpEntity entity) { + super(entity); + this.chunkedEnabled = Optional.empty(); + } + + /** + * Creates a {@link ContentHttpEntity} instance with the provided HTTP entity. + * + * @param entity the HTTP entity. + * @param chunkedEnabled force enable/disable chunked transfer-encoding. + */ + public ContentHttpEntity(HttpEntity entity, boolean chunkedEnabled) { + super(entity); + this.chunkedEnabled = Optional.of(chunkedEnabled); + } + + /** + * A chunked entity requires transfer-encoding:chunked in http headers + * which requires isChunked to be true + * + * @return true + */ + @Override + public boolean isChunked() { + return chunkedEnabled.orElseGet(super::isChunked); + } } /** diff --git a/client/rest/src/main/java/org/opensearch/client/RestClientBuilder.java b/client/rest/src/main/java/org/opensearch/client/RestClientBuilder.java index 0b259c7983ca5..8841d371754c3 100644 --- a/client/rest/src/main/java/org/opensearch/client/RestClientBuilder.java +++ b/client/rest/src/main/java/org/opensearch/client/RestClientBuilder.java @@ -46,6 +46,7 @@ import java.security.PrivilegedAction; import java.util.List; import java.util.Objects; +import java.util.Optional; /** * Helps creating a new {@link RestClient}. Allows to set the most common http client configuration options when internally @@ -84,6 +85,7 @@ public final class RestClientBuilder { private NodeSelector nodeSelector = NodeSelector.ANY; private boolean strictDeprecationMode = false; private boolean compressionEnabled = false; + private Optional chunkedEnabled; /** * Creates a new builder instance and sets the hosts that the client will send requests to. @@ -100,6 +102,7 @@ public final class RestClientBuilder { } } this.nodes = nodes; + this.chunkedEnabled = Optional.empty(); } /** @@ -238,6 +241,16 @@ public RestClientBuilder setCompressionEnabled(boolean compressionEnabled) { return this; } + /** + * Whether the REST client should use Transfer-Encoding: chunked for requests or not" + * + * @param chunkedEnabled force enable/disable chunked transfer-encoding. + */ + public RestClientBuilder setChunkedEnabled(boolean chunkedEnabled) { + this.chunkedEnabled = Optional.of(chunkedEnabled); + return this; + } + /** * Creates a new {@link RestClient} based on the provided configuration. */ @@ -248,16 +261,34 @@ public RestClient build() { CloseableHttpAsyncClient httpClient = AccessController.doPrivileged( (PrivilegedAction) this::createHttpClient ); - RestClient restClient = new RestClient( - httpClient, - defaultHeaders, - nodes, - pathPrefix, - failureListener, - nodeSelector, - strictDeprecationMode, - compressionEnabled - ); + + RestClient restClient = null; + + if (chunkedEnabled.isPresent()) { + restClient = new RestClient( + httpClient, + defaultHeaders, + nodes, + pathPrefix, + failureListener, + nodeSelector, + strictDeprecationMode, + compressionEnabled, + chunkedEnabled.get() + ); + } else { + restClient = new RestClient( + httpClient, + defaultHeaders, + nodes, + pathPrefix, + failureListener, + nodeSelector, + strictDeprecationMode, + compressionEnabled + ); + } + httpClient.start(); return restClient; } diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientCompressionTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientCompressionTests.java new file mode 100644 index 0000000000000..e8b7742044f67 --- /dev/null +++ b/client/rest/src/test/java/org/opensearch/client/RestClientCompressionTests.java @@ -0,0 +1,165 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.client; + +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; +import com.sun.net.httpserver.HttpServer; +import org.apache.http.HttpEntity; +import org.apache.http.HttpHost; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.nio.charset.StandardCharsets; +import java.util.concurrent.CompletableFuture; +import java.util.zip.GZIPInputStream; +import java.util.zip.GZIPOutputStream; + +public class RestClientCompressionTests extends RestClientTestCase { + + private static HttpServer httpServer; + + @BeforeClass + public static void startHttpServer() throws Exception { + httpServer = HttpServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); + httpServer.createContext("/", new GzipResponseHandler()); + httpServer.start(); + } + + @AfterClass + public static void stopHttpServers() throws IOException { + httpServer.stop(0); + httpServer = null; + } + + /** + * A response handler that accepts gzip-encoded data and replies request and response encoding values + * followed by the request body. The response is compressed if "Accept-Encoding" is "gzip". + */ + private static class GzipResponseHandler implements HttpHandler { + @Override + public void handle(HttpExchange exchange) throws IOException { + + // Decode body (if any) + String contentEncoding = exchange.getRequestHeaders().getFirst("Content-Encoding"); + String contentLength = exchange.getRequestHeaders().getFirst("Content-Length"); + InputStream body = exchange.getRequestBody(); + boolean compressedRequest = false; + if ("gzip".equals(contentEncoding)) { + body = new GZIPInputStream(body); + compressedRequest = true; + } + byte[] bytes = readAll(body); + boolean compress = "gzip".equals(exchange.getRequestHeaders().getFirst("Accept-Encoding")); + if (compress) { + exchange.getResponseHeaders().add("Content-Encoding", "gzip"); + } + + exchange.sendResponseHeaders(200, 0); + + // Encode response if needed + OutputStream out = exchange.getResponseBody(); + if (compress) { + out = new GZIPOutputStream(out); + } + + // Outputs ## + out.write(String.valueOf(contentEncoding).getBytes(StandardCharsets.UTF_8)); + out.write('#'); + out.write((compress ? "gzip" : "null").getBytes(StandardCharsets.UTF_8)); + out.write('#'); + out.write((compressedRequest ? contentLength : "null").getBytes(StandardCharsets.UTF_8)); + out.write('#'); + out.write(bytes); + out.close(); + + exchange.close(); + } + } + + /** + * Read all bytes of an input stream and close it. + */ + private static byte[] readAll(InputStream in) throws IOException { + byte[] buffer = new byte[1024]; + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + int len = 0; + while ((len = in.read(buffer)) > 0) { + bos.write(buffer, 0, len); + } + in.close(); + return bos.toByteArray(); + } + + private RestClient createClient(boolean enableCompression, boolean chunkedEnabled) { + InetSocketAddress address = httpServer.getAddress(); + return RestClient.builder(new HttpHost(address.getHostString(), address.getPort(), "http")) + .setCompressionEnabled(enableCompression) + .setChunkedEnabled(chunkedEnabled) + .build(); + } + + public void testCompressingClientWithContentLengthSync() throws Exception { + RestClient restClient = createClient(true, false); + + Request request = new Request("POST", "/"); + request.setEntity(new StringEntity("compressing client", ContentType.TEXT_PLAIN)); + + Response response = restClient.performRequest(request); + + HttpEntity entity = response.getEntity(); + String content = new String(readAll(entity.getContent()), StandardCharsets.UTF_8); + // Content-Encoding#Accept-Encoding#Content-Length#Content + Assert.assertEquals("gzip#gzip#38#compressing client", content); + + restClient.close(); + } + + public void testCompressingClientContentLengthAsync() throws Exception { + InetSocketAddress address = httpServer.getAddress(); + RestClient restClient = createClient(true, false); + + Request request = new Request("POST", "/"); + request.setEntity(new StringEntity("compressing client", ContentType.TEXT_PLAIN)); + + FutureResponse futureResponse = new FutureResponse(); + restClient.performRequestAsync(request, futureResponse); + Response response = futureResponse.get(); + + // Server should report it had a compressed request and sent back a compressed response + HttpEntity entity = response.getEntity(); + String content = new String(readAll(entity.getContent()), StandardCharsets.UTF_8); + + // Content-Encoding#Accept-Encoding#Content-Length#Content + Assert.assertEquals("gzip#gzip#38#compressing client", content); + + restClient.close(); + } + + public static class FutureResponse extends CompletableFuture implements ResponseListener { + @Override + public void onSuccess(Response response) { + this.complete(response); + } + + @Override + public void onFailure(Exception exception) { + this.completeExceptionally(exception); + } + } +} diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostTests.java index a2aef065c9ae8..e5ce5eb91ad5a 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostTests.java @@ -138,6 +138,7 @@ public void createRestClient() { failureListener, NodeSelector.ANY, strictDeprecationMode, + false, false ); } From f16584531cb5593e135c1466962a5effae1d0725 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Wed, 13 Jul 2022 16:36:40 -0400 Subject: [PATCH 008/104] Fixing implausibly old time stamp 1970-01-01 00:00:00 by using the timestamp from the Git revision instead of default 0 value (#3883) * Fixing implausibly old time stamp 1970-01-01 00:00:00 by using the timestamp from the Git revision instead of default 0 value Signed-off-by: Andriy Redko * Address code review comments Signed-off-by: Andriy Redko * Address code review comments (encapsulating the Git origin date inside build script Signed-off-by: Andriy Redko --- build.gradle | 41 +++++++++++++++++-- .../gradle/tar/SymbolicLinkPreservingTar.java | 14 +++++-- 2 files changed, 49 insertions(+), 6 deletions(-) diff --git a/build.gradle b/build.gradle index 73dd3ca9a8157..f56b375883b70 100644 --- a/build.gradle +++ b/build.gradle @@ -28,6 +28,9 @@ * under the License. */ +import java.nio.charset.StandardCharsets; +import java.io.ByteArrayOutputStream; + import com.avast.gradle.dockercompose.tasks.ComposePull import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin import de.thetaphi.forbiddenapis.gradle.ForbiddenApisPlugin @@ -37,11 +40,15 @@ import org.opensearch.gradle.Version import org.opensearch.gradle.VersionProperties import org.opensearch.gradle.info.BuildParams import org.opensearch.gradle.plugin.PluginBuildPlugin +import org.opensearch.gradle.tar.SymbolicLinkPreservingTar import org.gradle.plugins.ide.eclipse.model.AccessRule import org.gradle.plugins.ide.eclipse.model.EclipseJdt import org.gradle.plugins.ide.eclipse.model.SourceFolder +import org.gradle.api.Project; +import org.gradle.process.ExecResult; import org.gradle.util.DistributionLocator import org.gradle.util.GradleVersion + import static org.opensearch.gradle.util.GradleUtils.maybeConfigure plugins { @@ -150,6 +157,30 @@ Map buildMetadataMap = buildMetadataValue.tokenize(';').collectE return [key, value] } + /** + * Using 'git' command line (if available), tries to fetch the commit date of the current revision + * @return commit date of the current revision or 0 if it is not available + */ +long gitRevisionDate = { + // Try to get last commit date as Unix timestamp + try (ByteArrayOutputStream stdout = new ByteArrayOutputStream()) { + ExecResult result = project.exec(spec -> { + spec.setIgnoreExitValue(true); + spec.setStandardOutput(stdout); + spec.commandLine("git", "log", "-1", "--format=%ct"); + }); + + if (result.getExitValue() == 0) { + return Long.parseLong(stdout.toString(StandardCharsets.UTF_8).replaceAll("\\s", "")) * 1000; /* seconds to millis */ + } + } catch (IOException | GradleException | NumberFormatException ex) { + /* fall back to default Unix epoch timestamp */ + } + + return 0; +}() + + // injecting groovy property variables into all projects allprojects { project.ext { @@ -282,11 +313,15 @@ allprojects { } // support for reproducible builds - tasks.withType(AbstractArchiveTask).configureEach { + tasks.withType(AbstractArchiveTask).configureEach { task -> // ignore file timestamps // be consistent in archive file order - preserveFileTimestamps = false - reproducibleFileOrder = true + task.preserveFileTimestamps = false + task.reproducibleFileOrder = true + if (task instanceof SymbolicLinkPreservingTar) { + // Replace file timestamps with latest Git revision date (if available) + task.lastModifiedTimestamp = gitRevisionDate + } } project.afterEvaluate { diff --git a/buildSrc/src/main/java/org/opensearch/gradle/tar/SymbolicLinkPreservingTar.java b/buildSrc/src/main/java/org/opensearch/gradle/tar/SymbolicLinkPreservingTar.java index 7b8e9c9c925ba..1423b52c443d9 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/tar/SymbolicLinkPreservingTar.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/tar/SymbolicLinkPreservingTar.java @@ -65,6 +65,11 @@ * This task is necessary because the built-in task {@link org.gradle.api.tasks.bundling.Tar} does not preserve symbolic links. */ public class SymbolicLinkPreservingTar extends Tar { + private long lastModifiedTimestamp = 0; + + public void setLastModifiedTimestamp(long lastModifiedTimestamp) { + this.lastModifiedTimestamp = lastModifiedTimestamp; + } @Override protected CopyAction createCopyAction() { @@ -80,7 +85,7 @@ protected CopyAction createCopyAction() { compressor = new SimpleCompressor(); break; } - return new SymbolicLinkPreservingTarCopyAction(getArchiveFile(), compressor, isPreserveFileTimestamps()); + return new SymbolicLinkPreservingTarCopyAction(getArchiveFile(), compressor, isPreserveFileTimestamps(), lastModifiedTimestamp); } private static class SymbolicLinkPreservingTarCopyAction implements CopyAction { @@ -88,15 +93,18 @@ private static class SymbolicLinkPreservingTarCopyAction implements CopyAction { private final Provider tarFile; private final ArchiveOutputStreamFactory compressor; private final boolean isPreserveFileTimestamps; + private final long lastModifiedTimestamp; SymbolicLinkPreservingTarCopyAction( final Provider tarFile, final ArchiveOutputStreamFactory compressor, - final boolean isPreserveFileTimestamps + final boolean isPreserveFileTimestamps, + final long lastModifiedTimestamp ) { this.tarFile = tarFile; this.compressor = compressor; this.isPreserveFileTimestamps = isPreserveFileTimestamps; + this.lastModifiedTimestamp = lastModifiedTimestamp; } @Override @@ -219,7 +227,7 @@ private void handleProcessingException(final FileCopyDetailsInternal details, fi } private long getModTime(final FileCopyDetails details) { - return isPreserveFileTimestamps ? details.getLastModified() : 0; + return isPreserveFileTimestamps ? details.getLastModified() : lastModifiedTimestamp; } } From fde28530b58549bba63434de5fdc0848248a550a Mon Sep 17 00:00:00 2001 From: Felix Yan Date: Thu, 14 Jul 2022 16:01:47 +0300 Subject: [PATCH 009/104] Correct typo: Rutime -> Runtime (#3896) Signed-off-by: Felix Yan --- .../src/main/java/org/opensearch/gradle/info/BuildParams.java | 4 ++-- .../org/opensearch/gradle/info/GlobalBuildInfoPlugin.java | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/buildSrc/src/main/java/org/opensearch/gradle/info/BuildParams.java b/buildSrc/src/main/java/org/opensearch/gradle/info/BuildParams.java index 331794c9c2dd7..a9ccb75569002 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/info/BuildParams.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/info/BuildParams.java @@ -192,8 +192,8 @@ public void setRuntimeJavaHome(File runtimeJavaHome) { BuildParams.runtimeJavaHome = requireNonNull(runtimeJavaHome); } - public void setIsRutimeJavaHomeSet(boolean isRutimeJavaHomeSet) { - BuildParams.isRuntimeJavaHomeSet = isRutimeJavaHomeSet; + public void setIsRuntimeJavaHomeSet(boolean isRuntimeJavaHomeSet) { + BuildParams.isRuntimeJavaHomeSet = isRuntimeJavaHomeSet; } public void setJavaVersions(List javaVersions) { diff --git a/buildSrc/src/main/java/org/opensearch/gradle/info/GlobalBuildInfoPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/info/GlobalBuildInfoPlugin.java index ccd82372bb11b..166d8e3269d70 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/info/GlobalBuildInfoPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/info/GlobalBuildInfoPlugin.java @@ -113,7 +113,7 @@ public void apply(Project project) { params.reset(); params.setRuntimeJavaHome(runtimeJavaHome); params.setRuntimeJavaVersion(determineJavaVersion("runtime java.home", runtimeJavaHome, minimumRuntimeVersion)); - params.setIsRutimeJavaHomeSet(runtimeJavaHomeOpt.isPresent()); + params.setIsRuntimeJavaHomeSet(runtimeJavaHomeOpt.isPresent()); params.setRuntimeJavaDetails(getJavaInstallation(runtimeJavaHome).getDisplayName()); params.setJavaVersions(getAvailableJavaVersions(minimumCompilerVersion)); params.setMinimumCompilerVersion(minimumCompilerVersion); From 2858eb1a8d1ce742a242243fdafdce1bc27c006d Mon Sep 17 00:00:00 2001 From: Ripolin Date: Thu, 14 Jul 2022 19:15:39 +0200 Subject: [PATCH 010/104] Unable to use Systemd module with tar distribution (#3755) * Fix #3666 Signed-off-by: Florent David * Fix linter issues Signed-off-by: Florent David * Add systemd module in linux and freebsd archives Signed-off-by: Florent David --- distribution/build.gradle | 4 +-- .../org/opensearch/systemd/SystemdPlugin.java | 15 ++-------- .../systemd/SystemdPluginTests.java | 30 +++---------------- 3 files changed, 8 insertions(+), 41 deletions(-) diff --git a/distribution/build.gradle b/distribution/build.gradle index 356aaa269e106..21b7d85a7ef2b 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -217,7 +217,7 @@ ext.restTestExpansions = [ // loop over modules to also setup cross task dependencies and increment our modules counter project.rootProject.subprojects.findAll { it.parent.path == ':modules' }.each { Project module -> if (module.name == 'systemd') { - // the systemd module is only included in the package distributions + // the systemd module is only included in the package distributions or in linux and freebsd archives return } File licenses = new File(module.projectDir, 'licenses') @@ -367,7 +367,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { if (BuildParams.isSnapshotBuild()) { from(buildExternalTestModulesTaskProvider) } - if (project.path.startsWith(':distribution:packages')) { + if (project.path.startsWith(':distribution:packages') || ['freebsd-x64','linux-x64', 'linux-arm64'].contains(platform)) { from(buildSystemdModuleTaskProvider) } } diff --git a/modules/systemd/src/main/java/org/opensearch/systemd/SystemdPlugin.java b/modules/systemd/src/main/java/org/opensearch/systemd/SystemdPlugin.java index 3a2d51950f0be..8abdbbb74ae7f 100644 --- a/modules/systemd/src/main/java/org/opensearch/systemd/SystemdPlugin.java +++ b/modules/systemd/src/main/java/org/opensearch/systemd/SystemdPlugin.java @@ -35,7 +35,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; -import org.opensearch.Build; import org.opensearch.client.Client; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; @@ -68,20 +67,10 @@ final boolean isEnabled() { @SuppressWarnings("unused") public SystemdPlugin() { - this(true, Build.CURRENT.type(), System.getenv("OPENSEARCH_SD_NOTIFY")); + this(System.getenv("OPENSEARCH_SD_NOTIFY")); } - SystemdPlugin(final boolean assertIsPackageDistribution, final Build.Type buildType, final String esSDNotify) { - final boolean isPackageDistribution = buildType == Build.Type.DEB || buildType == Build.Type.RPM; - if (assertIsPackageDistribution) { - // our build is configured to only include this module in the package distributions - assert isPackageDistribution : buildType; - } - if (isPackageDistribution == false) { - logger.debug("disabling sd_notify as the build type [{}] is not a package distribution", buildType); - enabled = false; - return; - } + SystemdPlugin(final String esSDNotify) { logger.trace("OPENSEARCH_SD_NOTIFY is set to [{}]", esSDNotify); if (esSDNotify == null) { enabled = false; diff --git a/modules/systemd/src/test/java/org/opensearch/systemd/SystemdPluginTests.java b/modules/systemd/src/test/java/org/opensearch/systemd/SystemdPluginTests.java index cc8ee649ab782..63d97a7486f58 100644 --- a/modules/systemd/src/test/java/org/opensearch/systemd/SystemdPluginTests.java +++ b/modules/systemd/src/test/java/org/opensearch/systemd/SystemdPluginTests.java @@ -32,7 +32,6 @@ package org.opensearch.systemd; -import org.opensearch.Build; import org.opensearch.common.CheckedConsumer; import org.opensearch.common.unit.TimeValue; import org.opensearch.test.OpenSearchTestCase; @@ -58,13 +57,6 @@ import static org.mockito.Mockito.when; public class SystemdPluginTests extends OpenSearchTestCase { - - private final Build.Type randomPackageBuildType = randomFrom(Build.Type.DEB, Build.Type.RPM); - private final Build.Type randomNonPackageBuildType = randomValueOtherThanMany( - t -> t == Build.Type.DEB || t == Build.Type.RPM, - () -> randomFrom(Build.Type.values()) - ); - final Scheduler.Cancellable extender = mock(Scheduler.Cancellable.class); final ThreadPool threadPool = mock(ThreadPool.class); @@ -74,29 +66,15 @@ public class SystemdPluginTests extends OpenSearchTestCase { .thenReturn(extender); } - public void testIsEnabled() { - final SystemdPlugin plugin = new SystemdPlugin(false, randomPackageBuildType, Boolean.TRUE.toString()); - plugin.createComponents(null, null, threadPool, null, null, null, null, null, null, null, null); - assertTrue(plugin.isEnabled()); - assertNotNull(plugin.extender()); - } - - public void testIsNotPackageDistribution() { - final SystemdPlugin plugin = new SystemdPlugin(false, randomNonPackageBuildType, Boolean.TRUE.toString()); - plugin.createComponents(null, null, threadPool, null, null, null, null, null, null, null, null); - assertFalse(plugin.isEnabled()); - assertNull(plugin.extender()); - } - public void testIsImplicitlyNotEnabled() { - final SystemdPlugin plugin = new SystemdPlugin(false, randomPackageBuildType, null); + final SystemdPlugin plugin = new SystemdPlugin(null); plugin.createComponents(null, null, threadPool, null, null, null, null, null, null, null, null); assertFalse(plugin.isEnabled()); assertNull(plugin.extender()); } public void testIsExplicitlyNotEnabled() { - final SystemdPlugin plugin = new SystemdPlugin(false, randomPackageBuildType, Boolean.FALSE.toString()); + final SystemdPlugin plugin = new SystemdPlugin(Boolean.FALSE.toString()); plugin.createComponents(null, null, threadPool, null, null, null, null, null, null, null, null); assertFalse(plugin.isEnabled()); assertNull(plugin.extender()); @@ -107,7 +85,7 @@ public void testInvalid() { s -> Boolean.TRUE.toString().equals(s) || Boolean.FALSE.toString().equals(s), () -> randomAlphaOfLength(4) ); - final RuntimeException e = expectThrows(RuntimeException.class, () -> new SystemdPlugin(false, randomPackageBuildType, esSDNotify)); + final RuntimeException e = expectThrows(RuntimeException.class, () -> new SystemdPlugin(esSDNotify)); assertThat(e, hasToString(containsString("OPENSEARCH_SD_NOTIFY set to unexpected value [" + esSDNotify + "]"))); } @@ -174,7 +152,7 @@ private void runTest( final AtomicBoolean invoked = new AtomicBoolean(); final AtomicInteger invokedUnsetEnvironment = new AtomicInteger(); final AtomicReference invokedState = new AtomicReference<>(); - final SystemdPlugin plugin = new SystemdPlugin(false, randomPackageBuildType, esSDNotify) { + final SystemdPlugin plugin = new SystemdPlugin(esSDNotify) { @Override int sd_notify(final int unset_environment, final String state) { From 47e33bf435adfa2a118cd4ec2fe73793377781a3 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Thu, 14 Jul 2022 10:31:05 -0700 Subject: [PATCH 011/104] Add release notes for release 1.3.4. (#3894) Signed-off-by: Marc Handalian --- release-notes/opensearch.release-notes-1.3.4.md | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 release-notes/opensearch.release-notes-1.3.4.md diff --git a/release-notes/opensearch.release-notes-1.3.4.md b/release-notes/opensearch.release-notes-1.3.4.md new file mode 100644 index 0000000000000..f0f7d1079d686 --- /dev/null +++ b/release-notes/opensearch.release-notes-1.3.4.md @@ -0,0 +1,10 @@ +## Version 1.3.4 Release Notes + +### Upgrades +* Upgrade Jackson-databind to 2.13.2 resolving [CVE-2020-36518] ([#3777](https://github.com/opensearch-project/OpenSearch/pull/3777)) +* Upgrade netty to 4.1.79.Final resolving [CVE-2022-24823] ([#3875](https://github.com/opensearch-project/OpenSearch/pull/3875)) +* Upgrade tukaani:xz from 1.9 in plugins/ingest-attachment ([#3802](https://github.com/opensearch-project/OpenSearch/pull/3802)) +* Upgrade Tika (2.1.0), xmlbeans (3.1.0), Apache Commons-IO (2.11.0), and PDFBox (2.0.25) in plugins/ingest-attachment ([#3794](https://github.com/opensearch-project/OpenSearch/pull/3794)) + +### Bug Fixes +* Fix bug where opensearch crashes on closed client connection before search reply. ([#3655](https://github.com/opensearch-project/OpenSearch/pull/3655)) From d4465ce33b0cfd9728c455416aab3b7bcf618496 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Thu, 14 Jul 2022 13:37:11 -0400 Subject: [PATCH 012/104] Update to Gradle 7.5 (#3594) Signed-off-by: Andriy Redko --- .../StandaloneRestIntegTestTask.java | 25 ++++++++++++++++++- gradle/wrapper/gradle-wrapper.properties | 4 +-- 2 files changed, 26 insertions(+), 3 deletions(-) diff --git a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/StandaloneRestIntegTestTask.java b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/StandaloneRestIntegTestTask.java index bf17daa6e2e6f..abf5f3c1db198 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/StandaloneRestIntegTestTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/StandaloneRestIntegTestTask.java @@ -32,6 +32,7 @@ package org.opensearch.gradle.testclusters; import groovy.lang.Closure; + import org.opensearch.gradle.FileSystemOperationsAware; import org.opensearch.gradle.test.Fixture; import org.opensearch.gradle.util.GradleUtils; @@ -46,6 +47,8 @@ import org.gradle.internal.resources.ResourceLock; import org.gradle.internal.resources.SharedResource; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodType; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -124,12 +127,32 @@ public List getSharedResources() { int nodeCount = clusters.stream().mapToInt(cluster -> cluster.getNodes().size()).sum(); if (nodeCount > 0) { - locks.add(resource.getResourceLock(Math.min(nodeCount, resource.getMaxUsages()))); + locks.add(getResourceLock(resource, Math.min(nodeCount, resource.getMaxUsages()))); } return Collections.unmodifiableList(locks); } + private ResourceLock getResourceLock(SharedResource resource, int nUsages) { + try { + try { + // The getResourceLock(int) is used by Gradle pre-7.5 + return (ResourceLock) MethodHandles.publicLookup() + .findVirtual(SharedResource.class, "getResourceLock", MethodType.methodType(ResourceLock.class, int.class)) + .bindTo(resource) + .invokeExact(nUsages); + } catch (NoSuchMethodException | IllegalAccessException ex) { + // The getResourceLock() is used by Gradle post-7.4 + return (ResourceLock) MethodHandles.publicLookup() + .findVirtual(SharedResource.class, "getResourceLock", MethodType.methodType(ResourceLock.class)) + .bindTo(resource) + .invokeExact(); + } + } catch (Throwable ex) { + throw new IllegalStateException("Unable to find suitable ResourceLock::getResourceLock", ex); + } + } + @Override public Task dependsOn(Object... dependencies) { super.dependsOn(dependencies); diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index a8e09684f1fd3..24c164f0f1e12 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -11,7 +11,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-7.4.2-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-7.5-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=e6d864e3b5bc05cc62041842b306383fc1fefcec359e70cebb1d470a6094ca82 +distributionSha256Sum=97a52d145762adc241bad7fd18289bf7f6801e08ece6badf80402fe2b9f250b1 From 401086d65906da41b2a64147b4c3beb441515199 Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Thu, 14 Jul 2022 11:42:32 -0700 Subject: [PATCH 013/104] Move badges after logo & change ordering of badges (#3902) Signed-off-by: Suraj Singh --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 5c5366b209fc9..a7abedeefde8e 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,12 @@ + + +[![Chat](https://img.shields.io/badge/chat-on%20forums-blue)](https://forum.opensearch.org/c/opensearch/) +[![Documentation](https://img.shields.io/badge/documentation-reference-blue)](https://opensearch.org/docs/latest/opensearch/index/) [![codecov](https://codecov.io/gh/opensearch-project/OpenSearch/branch/main/graph/badge.svg)](https://codecov.io/gh/opensearch-project/OpenSearch) [![GHA gradle check](https://github.com/opensearch-project/OpenSearch/actions/workflows/gradle-check.yml/badge.svg)](https://github.com/opensearch-project/OpenSearch/actions/workflows/gradle-check.yml) [![GHA validate pull request](https://github.com/opensearch-project/OpenSearch/actions/workflows/wrapper.yml/badge.svg)](https://github.com/opensearch-project/OpenSearch/actions/workflows/wrapper.yml) [![GHA precommit](https://github.com/opensearch-project/OpenSearch/actions/workflows/precommit.yml/badge.svg)](https://github.com/opensearch-project/OpenSearch/actions/workflows/precommit.yml) [![Jenkins gradle check job](https://img.shields.io/jenkins/build?jobUrl=https%3A%2F%2Fbuild.ci.opensearch.org%2Fjob%2Fgradle-check%2F&label=Jenkins%20Gradle%20Check)](https://build.ci.opensearch.org/job/gradle-check/) -[![Documentation](https://img.shields.io/badge/doc-reference-blue)](https://opensearch.org/docs/latest/opensearch/index/) -[![Chat](https://img.shields.io/badge/chat-on%20forums-blue)](https://forum.opensearch.org/c/opensearch/) - - - [Welcome!](#welcome) - [Project Resources](#project-resources) From c526b0c8dd9dcf8f86dc4417347995c6b2e1ec5d Mon Sep 17 00:00:00 2001 From: Daniel Widdis Date: Thu, 14 Jul 2022 13:18:36 -0700 Subject: [PATCH 014/104] Don't run EmptyDirTaskTests in a Docker container (#3792) * Don't run EmptyDirTaskTests in a Docker container Signed-off-by: Daniel Widdis * Catch possible exceptions Signed-off-by: Daniel Widdis * Fix newlines with spotless Signed-off-by: Daniel Widdis * Add comments explaining test incompatibilities Signed-off-by: Daniel Widdis * IDE incorrectly removed needed imports Signed-off-by: Daniel Widdis * Manual edit missed a duplicate Signed-off-by: Daniel Widdis --- .../opensearch/gradle/EmptyDirTaskTests.java | 25 ++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/buildSrc/src/test/java/org/opensearch/gradle/EmptyDirTaskTests.java b/buildSrc/src/test/java/org/opensearch/gradle/EmptyDirTaskTests.java index b4eae42c625ac..a6dc268b90557 100644 --- a/buildSrc/src/test/java/org/opensearch/gradle/EmptyDirTaskTests.java +++ b/buildSrc/src/test/java/org/opensearch/gradle/EmptyDirTaskTests.java @@ -33,6 +33,9 @@ import java.io.File; import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.InvalidPathException; +import java.nio.file.Path; import com.carrotsearch.randomizedtesting.RandomizedTest; import org.apache.tools.ant.taskdefs.condition.Os; @@ -64,7 +67,11 @@ public void testCreateEmptyDir() throws Exception { } public void testCreateEmptyDirNoPermissions() throws Exception { - RandomizedTest.assumeFalse("Functionality is Unix specific", Os.isFamily(Os.FAMILY_WINDOWS)); + // Test depends on Posix file permissions + RandomizedTest.assumeFalse("Functionality is Unix-like OS specific", Os.isFamily(Os.FAMILY_WINDOWS)); + // Java's Files.setPosixFilePermissions is a NOOP inside a Docker container as + // files are created by default with UID and GID = 0 (root). + RandomizedTest.assumeFalse("Functionality doesn't work in Docker", isRunningInDocker()); Project project = ProjectBuilder.builder().build(); EmptyDirTask emptyDirTask = project.getTasks().create("emptyDirTask", EmptyDirTask.class); @@ -92,4 +99,20 @@ private File getNewNonExistingTempFolderFile(Project project) throws IOException return newEmptyFolder; } + private static boolean isRunningInDocker() { + // Only reliable existing method but may be removed in future + if (new File("/.dockerenv").exists()) { + return true; + } + try { + // Backup 1: look for 'docker' in one of the paths in /proc/1/cgroup + if (Files.lines(Path.of("/proc/1/cgroup")).anyMatch(line -> line.contains("docker"))) { + return true; + } + // Backup 2: look for 'docker' in overlay fs + return Files.lines(Path.of("/proc/1/mounts")).anyMatch(line -> line.startsWith("overlay") && line.contains("docker")); + } catch (InvalidPathException | IOException e) { + return false; + } + } } From e724b2e3234fe1baa4f78044dd87db539e213947 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Thu, 14 Jul 2022 18:39:29 -0300 Subject: [PATCH 015/104] Added bwc version 1.3.5 (#3911) Signed-off-by: GitHub Co-authored-by: opensearch-ci-bot --- .ci/bwcVersions | 1 + server/src/main/java/org/opensearch/Version.java | 1 + 2 files changed, 2 insertions(+) diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 38909fe05dc1e..347b88ecc3e4d 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -41,6 +41,7 @@ BWC_VERSION: - "1.3.2" - "1.3.3" - "1.3.4" + - "1.3.5" - "2.0.0" - "2.0.1" - "2.0.2" diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index a518f5e0a0b76..d4c2920c8a452 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -88,6 +88,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_1_3_2 = new Version(1030299, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_1_3_3 = new Version(1030399, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_1_3_4 = new Version(1030499, org.apache.lucene.util.Version.LUCENE_8_10_1); + public static final Version V_1_3_5 = new Version(1030599, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_2_0_0 = new Version(2000099, org.apache.lucene.util.Version.LUCENE_9_1_0); public static final Version V_2_0_1 = new Version(2000199, org.apache.lucene.util.Version.LUCENE_9_1_0); public static final Version V_2_0_2 = new Version(2000299, org.apache.lucene.util.Version.LUCENE_9_1_0); From 0e96a878a5ab1ffec3ee04f15fe976d34c9905dc Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Thu, 14 Jul 2022 16:39:15 -0700 Subject: [PATCH 016/104] Deprecate public class names with master terminology (#3871) - Add back the usage of old names for some classes for backwards compatibility. Those public classes were renamed from "master" to "cluster manager" In PR https://github.com/opensearch-project/OpenSearch/pull/3619 / commit https://github.com/opensearch-project/OpenSearch/commit/a7e113a67a9d8c4dd7298956174ff2f666a2c2b7. - Add a unit test to validate the backwards compatibility of interface `LocalNodeMasterListener` remains. - Revert the renaming of class `MasterService`, `FakeThreadPoolMasterService`, `BlockMasterServiceOnMaster` and `BusyMasterServiceDisruption`, as well as instance variable names of class `MasterService`. Because I couldn't solve the compatibility problem of a public method `public ClusterManagerService getMasterService()` (https://github.com/opensearch-project/OpenSearch/pull/3871#discussion_r920623073) **List of classes that renamed in previous PR:** In this PR, the usages of the old names should all be restored. `sever` directory: interface LocalNodeMasterListener -> LocalNodeClusterManagerListener final class MasterNodeChangePredicate -> ClusterManagerNodeChangePredicate NotMasterException -> NotClusterManagerException NoMasterBlockService -> NoClusterManagerBlockService UnsafeBootstrapMasterCommand - UnsafeBootstrapClusterManagerCommand MasterNotDiscoveredException -> ClusterManagerNotDiscoveredException RestMasterAction -> RestClusterManagerAction **List of classes that not renamed:** `sever` directory: MasterService `test/framework` directory: FakeThreadPoolMasterService BlockMasterServiceOnMaster BusyMasterServiceDisruption Signed-off-by: Tianli Feng --- .../discovery/ClusterManagerDisruptionIT.java | 4 +- .../DedicatedClusterSnapshotRestoreIT.java | 4 +- .../org/opensearch/OpenSearchException.java | 13 +- .../health/TransportClusterHealthAction.java | 4 +- .../cluster/ClusterStateTaskListener.java | 4 +- .../cluster/LocalNodeMasterListener.java | 44 +++++ .../cluster/MasterNodeChangePredicate.java | 53 +++++++ .../cluster/NotMasterException.java | 57 +++++++ .../cluster/coordination/Coordinator.java | 18 +-- .../cluster/coordination/JoinHelper.java | 17 +- .../coordination/NoMasterBlockService.java | 50 ++++++ .../UnsafeBootstrapMasterCommand.java | 47 ++++++ .../cluster/service/ClusterService.java | 22 +-- ...ManagerService.java => MasterService.java} | 6 +- .../common/settings/ClusterSettings.java | 6 +- .../settings/ConsistentSettingsService.java | 8 +- .../common/util/concurrent/BaseFuture.java | 4 +- .../opensearch/discovery/DiscoveryModule.java | 6 +- .../MasterNotDiscoveredException.java | 63 ++++++++ .../rest/action/cat/RestMasterAction.java | 44 +++++ .../ExceptionSerializationTests.java | 11 +- .../RenamedTimeoutRequestParameterTests.java | 8 + ...rnalClusterInfoServiceSchedulingTests.java | 12 +- .../cluster/coordination/NodeJoinTests.java | 38 ++--- .../service/ClusterApplierServiceTests.java | 38 +++++ ... => MasterServiceRenamedSettingTests.java} | 26 +-- ...viceTests.java => MasterServiceTests.java} | 150 +++++++++--------- .../discovery/DiscoveryModuleTests.java | 8 +- .../snapshots/SnapshotResiliencyTests.java | 19 +-- .../AbstractCoordinatorTestCase.java | 22 +-- ....java => FakeThreadPoolMasterService.java} | 8 +- .../opensearch/test/ClusterServiceUtils.java | 18 +-- ...r.java => BlockMasterServiceOnMaster.java} | 4 +- ....java => BusyMasterServiceDisruption.java} | 4 +- ... => FakeThreadPoolMasterServiceTests.java} | 14 +- 35 files changed, 628 insertions(+), 226 deletions(-) create mode 100644 server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java create mode 100644 server/src/main/java/org/opensearch/cluster/MasterNodeChangePredicate.java create mode 100644 server/src/main/java/org/opensearch/cluster/NotMasterException.java create mode 100644 server/src/main/java/org/opensearch/cluster/coordination/NoMasterBlockService.java create mode 100644 server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapMasterCommand.java rename server/src/main/java/org/opensearch/cluster/service/{ClusterManagerService.java => MasterService.java} (99%) create mode 100644 server/src/main/java/org/opensearch/discovery/MasterNotDiscoveredException.java create mode 100644 server/src/main/java/org/opensearch/rest/action/cat/RestMasterAction.java rename server/src/test/java/org/opensearch/cluster/service/{ClusterManagerServiceRenamedSettingTests.java => MasterServiceRenamedSettingTests.java} (68%) rename server/src/test/java/org/opensearch/cluster/service/{ClusterManagerServiceTests.java => MasterServiceTests.java} (87%) rename test/framework/src/main/java/org/opensearch/cluster/service/{FakeThreadPoolClusterManagerService.java => FakeThreadPoolMasterService.java} (96%) rename test/framework/src/main/java/org/opensearch/test/disruption/{BlockClusterManagerServiceOnClusterManager.java => BlockMasterServiceOnMaster.java} (96%) rename test/framework/src/main/java/org/opensearch/test/disruption/{BusyClusterManagerServiceDisruption.java => BusyMasterServiceDisruption.java} (95%) rename test/framework/src/test/java/org/opensearch/cluster/service/{FakeThreadPoolClusterManagerServiceTests.java => FakeThreadPoolMasterServiceTests.java} (92%) diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java index 6c055c4015ff0..2f19d1e476c94 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java @@ -43,7 +43,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentType; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.disruption.BlockClusterManagerServiceOnClusterManager; +import org.opensearch.test.disruption.BlockMasterServiceOnMaster; import org.opensearch.test.disruption.IntermittentLongGCDisruption; import org.opensearch.test.disruption.NetworkDisruption; import org.opensearch.test.disruption.NetworkDisruption.TwoPartitions; @@ -308,7 +308,7 @@ public void testMappingTimeout() throws Exception { .setTransientSettings(Settings.builder().put("indices.mapping.dynamic_timeout", "1ms")) ); - ServiceDisruptionScheme disruption = new BlockClusterManagerServiceOnClusterManager(random()); + ServiceDisruptionScheme disruption = new BlockMasterServiceOnMaster(random()); setDisruptionScheme(disruption); disruption.startDisrupting(); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index a4afdc0dba85c..2eca8555e1388 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -94,7 +94,7 @@ import org.opensearch.test.OpenSearchIntegTestCase.Scope; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.TestCustomMetadata; -import org.opensearch.test.disruption.BusyClusterManagerServiceDisruption; +import org.opensearch.test.disruption.BusyMasterServiceDisruption; import org.opensearch.test.disruption.ServiceDisruptionScheme; import org.opensearch.test.rest.FakeRestRequest; import org.opensearch.test.transport.MockTransportService; @@ -1157,7 +1157,7 @@ public void testDataNodeRestartWithBusyClusterManagerDuringSnapshot() throws Exc final String dataNode = blockNodeWithIndex("test-repo", "test-idx"); logger.info("--> snapshot"); - ServiceDisruptionScheme disruption = new BusyClusterManagerServiceDisruption(random(), Priority.HIGH); + ServiceDisruptionScheme disruption = new BusyMasterServiceDisruption(random(), Priority.HIGH); setDisruptionScheme(disruption); client(internalCluster().getMasterName()).admin() .cluster() diff --git a/server/src/main/java/org/opensearch/OpenSearchException.java b/server/src/main/java/org/opensearch/OpenSearchException.java index 4c12d031a028f..4ebcd9622ce38 100644 --- a/server/src/main/java/org/opensearch/OpenSearchException.java +++ b/server/src/main/java/org/opensearch/OpenSearchException.java @@ -33,7 +33,6 @@ package org.opensearch; import org.opensearch.action.support.replication.ReplicationOperation; -import org.opensearch.cluster.NotClusterManagerException; import org.opensearch.cluster.action.shard.ShardStateAction; import org.opensearch.common.CheckedFunction; import org.opensearch.common.Nullable; @@ -47,7 +46,6 @@ import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentParseException; import org.opensearch.common.xcontent.XContentParser; -import org.opensearch.discovery.ClusterManagerNotDiscoveredException; import org.opensearch.index.Index; import org.opensearch.index.shard.ShardId; import org.opensearch.rest.RestStatus; @@ -791,8 +789,8 @@ private enum OpenSearchExceptionHandle { UNKNOWN_VERSION_ADDED ), CLUSTER_MANAGER_NOT_DISCOVERED_EXCEPTION( - ClusterManagerNotDiscoveredException.class, - ClusterManagerNotDiscoveredException::new, + org.opensearch.discovery.ClusterManagerNotDiscoveredException.class, + org.opensearch.discovery.ClusterManagerNotDiscoveredException::new, 3, UNKNOWN_VERSION_ADDED ), @@ -1501,7 +1499,12 @@ private enum OpenSearchExceptionHandle { 143, UNKNOWN_VERSION_ADDED ), - NOT_CLUSTER_MANAGER_EXCEPTION(NotClusterManagerException.class, NotClusterManagerException::new, 144, UNKNOWN_VERSION_ADDED), + NOT_CLUSTER_MANAGER_EXCEPTION( + org.opensearch.cluster.NotClusterManagerException.class, + org.opensearch.cluster.NotClusterManagerException::new, + 144, + UNKNOWN_VERSION_ADDED + ), STATUS_EXCEPTION( org.opensearch.OpenSearchStatusException.class, org.opensearch.OpenSearchStatusException::new, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java index 8a2ad77ac1693..3e3d373ff3b31 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -226,8 +226,8 @@ public void onNoLongerMaster(String source) { "stopped being cluster-manager while waiting for events with priority [{}]. retrying.", request.waitForEvents() ); - // TransportClusterManagerNodeAction implements the retry logic, which is triggered by passing a - // NotClusterManagerException + // TransportClusterManagerNodeAction implements the retry logic, + // which is triggered by passing a NotClusterManagerException listener.onFailure(new NotClusterManagerException("no longer cluster-manager. source: [" + source + "]")); } diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java index 6a12e0cd5ed46..74cc118892579 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java @@ -31,7 +31,7 @@ package org.opensearch.cluster; -import org.opensearch.cluster.service.ClusterManagerService; +import org.opensearch.cluster.service.MasterService; import java.util.List; @@ -49,7 +49,7 @@ public interface ClusterStateTaskListener { /** * called when the task was rejected because the local node is no longer cluster-manager. - * Used only for tasks submitted to {@link ClusterManagerService}. + * Used only for tasks submitted to {@link MasterService}. */ default void onNoLongerMaster(String source) { onFailure(source, new NotClusterManagerException("no longer cluster-manager. source: [" + source + "]")); diff --git a/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java b/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java new file mode 100644 index 0000000000000..eebfb60d8472d --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.cluster; + +/** + * Enables listening to cluster-manager changes events of the local node (when the local node becomes the cluster-manager, and when the local + * node cease being a cluster-manager). + * + * @opensearch.internal + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link LocalNodeClusterManagerListener} + */ +@Deprecated +public interface LocalNodeMasterListener extends LocalNodeClusterManagerListener { + +} diff --git a/server/src/main/java/org/opensearch/cluster/MasterNodeChangePredicate.java b/server/src/main/java/org/opensearch/cluster/MasterNodeChangePredicate.java new file mode 100644 index 0000000000000..d06aa219e3ca6 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/MasterNodeChangePredicate.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.cluster; + +import java.util.function.Predicate; + +/** + * Utility class to build a predicate that accepts cluster state changes + * + * @opensearch.internal + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link ClusterManagerNodeChangePredicate} + */ +@Deprecated +public final class MasterNodeChangePredicate { + + private MasterNodeChangePredicate() { + + } + + public static Predicate build(ClusterState currentState) { + return ClusterManagerNodeChangePredicate.build(currentState); + } +} diff --git a/server/src/main/java/org/opensearch/cluster/NotMasterException.java b/server/src/main/java/org/opensearch/cluster/NotMasterException.java new file mode 100644 index 0000000000000..1dbf13212d2dd --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/NotMasterException.java @@ -0,0 +1,57 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.cluster; + +import org.opensearch.common.io.stream.StreamInput; + +import java.io.IOException; + +/** + * Thrown when a node join request or a cluster-manager ping reaches a node which is not + * currently acting as a cluster-manager or when a cluster state update task is to be executed + * on a node that is no longer cluster-manager. + * + * @opensearch.internal + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link NotClusterManagerException} + */ +@Deprecated +public class NotMasterException extends NotClusterManagerException { + + public NotMasterException(String msg) { + super(msg); + } + + public NotMasterException(StreamInput in) throws IOException { + super(in); + } + +} diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java index ee71307e82ec7..56d585d81e9dc 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java @@ -58,7 +58,7 @@ import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.service.ClusterApplier; import org.opensearch.cluster.service.ClusterApplier.ClusterApplyListener; -import org.opensearch.cluster.service.ClusterManagerService; +import org.opensearch.cluster.service.MasterService; import org.opensearch.common.Booleans; import org.opensearch.common.Nullable; import org.opensearch.common.Priority; @@ -141,7 +141,7 @@ public class Coordinator extends AbstractLifecycleComponent implements Discovery private final boolean singleNodeDiscovery; private final ElectionStrategy electionStrategy; private final TransportService transportService; - private final ClusterManagerService clusterManagerService; + private final MasterService masterService; private final AllocationService allocationService; private final JoinHelper joinHelper; private final NodeRemovalClusterStateTaskExecutor nodeRemovalExecutor; @@ -191,7 +191,7 @@ public Coordinator( TransportService transportService, NamedWriteableRegistry namedWriteableRegistry, AllocationService allocationService, - ClusterManagerService clusterManagerService, + MasterService masterService, Supplier persistedStateSupplier, SeedHostsProvider seedHostsProvider, ClusterApplier clusterApplier, @@ -203,7 +203,7 @@ public Coordinator( ) { this.settings = settings; this.transportService = transportService; - this.clusterManagerService = clusterManagerService; + this.masterService = masterService; this.allocationService = allocationService; this.onJoinValidators = JoinTaskExecutor.addBuiltInJoinValidators(onJoinValidators); this.singleNodeDiscovery = DiscoveryModule.isSingleNodeDiscovery(settings); @@ -211,7 +211,7 @@ public Coordinator( this.joinHelper = new JoinHelper( settings, allocationService, - clusterManagerService, + masterService, transportService, this::getCurrentTerm, this::getStateForClusterManagerService, @@ -260,7 +260,7 @@ public Coordinator( ); this.nodeRemovalExecutor = new NodeRemovalClusterStateTaskExecutor(allocationService, logger); this.clusterApplier = clusterApplier; - clusterManagerService.setClusterStateSupplier(this::getStateForClusterManagerService); + masterService.setClusterStateSupplier(this::getStateForClusterManagerService); this.reconfigurator = new Reconfigurator(settings, clusterSettings); this.clusterBootstrapService = new ClusterBootstrapService( settings, @@ -310,7 +310,7 @@ private void onLeaderFailure(Exception e) { private void removeNode(DiscoveryNode discoveryNode, String reason) { synchronized (mutex) { if (mode == Mode.LEADER) { - clusterManagerService.submitStateUpdateTask( + masterService.submitStateUpdateTask( "node-left", new NodeRemovalClusterStateTaskExecutor.Task(discoveryNode, reason), ClusterStateTaskConfig.build(Priority.IMMEDIATE), @@ -756,7 +756,7 @@ void becomeFollower(String method, DiscoveryNode leaderNode) { } private void cleanClusterManagerService() { - clusterManagerService.submitStateUpdateTask("clean-up after stepping down as cluster-manager", new LocalClusterUpdateTask() { + masterService.submitStateUpdateTask("clean-up after stepping down as cluster-manager", new LocalClusterUpdateTask() { @Override public void onFailure(String source, Exception e) { // ignore @@ -1126,7 +1126,7 @@ private void scheduleReconfigurationIfNeeded() { final ClusterState state = getLastAcceptedState(); if (improveConfiguration(state) != state && reconfigurationTaskScheduled.compareAndSet(false, true)) { logger.trace("scheduling reconfiguration"); - clusterManagerService.submitStateUpdateTask("reconfigure", new ClusterStateUpdateTask(Priority.URGENT) { + masterService.submitStateUpdateTask("reconfigure", new ClusterStateUpdateTask(Priority.URGENT) { @Override public ClusterState execute(ClusterState currentState) { reconfigurationTaskScheduled.set(false); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java index aeff856bef51a..3accc4f1d5baf 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java @@ -46,7 +46,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.RerouteService; import org.opensearch.cluster.routing.allocation.AllocationService; -import org.opensearch.cluster.service.ClusterManagerService; +import org.opensearch.cluster.service.MasterService; import org.opensearch.common.Priority; import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.stream.StreamInput; @@ -106,7 +106,7 @@ public class JoinHelper { Setting.Property.Deprecated ); - private final ClusterManagerService clusterManagerService; + private final MasterService masterService; private final TransportService transportService; private volatile JoinTaskExecutor joinTaskExecutor; @@ -122,7 +122,7 @@ public class JoinHelper { JoinHelper( Settings settings, AllocationService allocationService, - ClusterManagerService clusterManagerService, + MasterService masterService, TransportService transportService, LongSupplier currentTermSupplier, Supplier currentStateSupplier, @@ -132,7 +132,7 @@ public class JoinHelper { RerouteService rerouteService, NodeHealthService nodeHealthService ) { - this.clusterManagerService = clusterManagerService; + this.masterService = masterService; this.transportService = transportService; this.nodeHealthService = nodeHealthService; this.joinTimeout = JOIN_TIMEOUT_SETTING.get(settings); @@ -143,9 +143,8 @@ public class JoinHelper { @Override public ClusterTasksResult execute(ClusterState currentState, List joiningTasks) throws Exception { - // The current state that ClusterManagerService uses might have been updated by a (different) cluster-manager in a higher - // term - // already + // The current state that ClusterManagerService uses might have been updated by a (different) cluster-manager + // in a higher term already // Stop processing the current cluster state update, as there's no point in continuing to compute it as // it will later be rejected by Coordinator.publish(...) anyhow if (currentState.term() > term) { @@ -455,7 +454,7 @@ class LeaderJoinAccumulator implements JoinAccumulator { public void handleJoinRequest(DiscoveryNode sender, JoinCallback joinCallback) { final JoinTaskExecutor.Task task = new JoinTaskExecutor.Task(sender, "join existing leader"); assert joinTaskExecutor != null; - clusterManagerService.submitStateUpdateTask( + masterService.submitStateUpdateTask( "node-join", task, ClusterStateTaskConfig.build(Priority.URGENT), @@ -540,7 +539,7 @@ public void close(Mode newMode) { pendingAsTasks.put(JoinTaskExecutor.newBecomeMasterTask(), (source, e) -> {}); pendingAsTasks.put(JoinTaskExecutor.newFinishElectionTask(), (source, e) -> {}); joinTaskExecutor = joinTaskExecutorGenerator.get(); - clusterManagerService.submitStateUpdateTasks( + masterService.submitStateUpdateTasks( stateUpdateSource, pendingAsTasks, ClusterStateTaskConfig.build(Priority.URGENT), diff --git a/server/src/main/java/org/opensearch/cluster/coordination/NoMasterBlockService.java b/server/src/main/java/org/opensearch/cluster/coordination/NoMasterBlockService.java new file mode 100644 index 0000000000000..1dff7b2a8f0d6 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/coordination/NoMasterBlockService.java @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.cluster.coordination; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; + +/** + * Service to block the master node + * + * @opensearch.internal + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link NoClusterManagerBlockService} + */ +@Deprecated +public class NoMasterBlockService extends NoClusterManagerBlockService { + + public NoMasterBlockService(Settings settings, ClusterSettings clusterSettings) { + super(settings, clusterSettings); + } + +} diff --git a/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapMasterCommand.java b/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapMasterCommand.java new file mode 100644 index 0000000000000..6014dc0b44ab8 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapMasterCommand.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.cluster.coordination; + +/** + * Tool to run an unsafe bootstrap + * + * @opensearch.internal + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link UnsafeBootstrapClusterManagerCommand} + */ +@Deprecated +public class UnsafeBootstrapMasterCommand extends UnsafeBootstrapClusterManagerCommand { + + UnsafeBootstrapMasterCommand() { + super(); + } + +} diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterService.java index 7fd8ed62def07..baf453d18b3b3 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterService.java @@ -62,7 +62,7 @@ * @opensearch.internal */ public class ClusterService extends AbstractLifecycleComponent { - private final ClusterManagerService clusterManagerService; + private final MasterService masterService; private final ClusterApplierService clusterApplierService; @@ -92,7 +92,7 @@ public ClusterService(Settings settings, ClusterSettings clusterSettings, Thread this( settings, clusterSettings, - new ClusterManagerService(settings, clusterSettings, threadPool), + new MasterService(settings, clusterSettings, threadPool), new ClusterApplierService(Node.NODE_NAME_SETTING.get(settings), settings, clusterSettings, threadPool) ); } @@ -100,12 +100,12 @@ public ClusterService(Settings settings, ClusterSettings clusterSettings, Thread public ClusterService( Settings settings, ClusterSettings clusterSettings, - ClusterManagerService clusterManagerService, + MasterService masterService, ClusterApplierService clusterApplierService ) { this.settings = settings; this.nodeName = Node.NODE_NAME_SETTING.get(settings); - this.clusterManagerService = clusterManagerService; + this.masterService = masterService; this.operationRouting = new OperationRouting(settings, clusterSettings); this.clusterSettings = clusterSettings; this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); @@ -131,18 +131,18 @@ public RerouteService getRerouteService() { @Override protected synchronized void doStart() { clusterApplierService.start(); - clusterManagerService.start(); + masterService.start(); } @Override protected synchronized void doStop() { - clusterManagerService.stop(); + masterService.stop(); clusterApplierService.stop(); } @Override protected synchronized void doClose() { - clusterManagerService.close(); + masterService.close(); clusterApplierService.close(); } @@ -218,8 +218,8 @@ public void addLocalNodeMasterListener(LocalNodeClusterManagerListener listener) clusterApplierService.addLocalNodeMasterListener(listener); } - public ClusterManagerService getMasterService() { - return clusterManagerService; + public MasterService getMasterService() { + return masterService; } /** @@ -242,7 +242,7 @@ public ClusterApplierService getClusterApplierService() { public static boolean assertClusterOrMasterStateThread() { assert Thread.currentThread().getName().contains(ClusterApplierService.CLUSTER_UPDATE_THREAD_NAME) - || Thread.currentThread().getName().contains(ClusterManagerService.MASTER_UPDATE_THREAD_NAME) + || Thread.currentThread().getName().contains(MasterService.MASTER_UPDATE_THREAD_NAME) : "not called from the master/cluster state update thread"; return true; } @@ -333,6 +333,6 @@ public void submitStateUpdateTasks( final ClusterStateTaskConfig config, final ClusterStateTaskExecutor executor ) { - clusterManagerService.submitStateUpdateTasks(source, tasks, config, executor); + masterService.submitStateUpdateTasks(source, tasks, config, executor); } } diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerService.java b/server/src/main/java/org/opensearch/cluster/service/MasterService.java similarity index 99% rename from server/src/main/java/org/opensearch/cluster/service/ClusterManagerService.java rename to server/src/main/java/org/opensearch/cluster/service/MasterService.java index df691c1443a56..2d52887858372 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerService.java +++ b/server/src/main/java/org/opensearch/cluster/service/MasterService.java @@ -88,8 +88,8 @@ * * @opensearch.internal */ -public class ClusterManagerService extends AbstractLifecycleComponent { - private static final Logger logger = LogManager.getLogger(ClusterManagerService.class); +public class MasterService extends AbstractLifecycleComponent { + private static final Logger logger = LogManager.getLogger(MasterService.class); public static final Setting MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING = Setting.positiveTimeSetting( "cluster.service.slow_master_task_logging_threshold", @@ -122,7 +122,7 @@ public class ClusterManagerService extends AbstractLifecycleComponent { private volatile PrioritizedOpenSearchThreadPoolExecutor threadPoolExecutor; private volatile Batcher taskBatcher; - public ClusterManagerService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { + public MasterService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { this.nodeName = Objects.requireNonNull(Node.NODE_NAME_SETTING.get(settings)); this.slowTaskLoggingThreshold = CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings); diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 4451d11a7b976..f3d2ab0859513 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -81,7 +81,7 @@ import org.opensearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.opensearch.cluster.service.ClusterApplierService; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.cluster.service.ClusterManagerService; +import org.opensearch.cluster.service.MasterService; import org.opensearch.common.logging.Loggers; import org.opensearch.common.network.NetworkModule; import org.opensearch.common.network.NetworkService; @@ -334,8 +334,8 @@ public void apply(Settings value, Settings current, Settings previous) { IndexModule.NODE_STORE_ALLOW_MMAP, ClusterApplierService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, ClusterService.USER_DEFINED_METADATA, - ClusterManagerService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, // deprecated - ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, + MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, // deprecated + MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, SearchService.DEFAULT_SEARCH_TIMEOUT_SETTING, SearchService.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS, TransportSearchAction.SHARD_COUNT_LIMIT_SETTING, diff --git a/server/src/main/java/org/opensearch/common/settings/ConsistentSettingsService.java b/server/src/main/java/org/opensearch/common/settings/ConsistentSettingsService.java index 062d62956d377..3be1c4b080b5f 100644 --- a/server/src/main/java/org/opensearch/common/settings/ConsistentSettingsService.java +++ b/server/src/main/java/org/opensearch/common/settings/ConsistentSettingsService.java @@ -36,7 +36,7 @@ import org.apache.logging.log4j.Logger; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; -import org.opensearch.cluster.LocalNodeClusterManagerListener; +import org.opensearch.cluster.LocalNodeMasterListener; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Priority; @@ -88,10 +88,10 @@ public ConsistentSettingsService(Settings settings, ClusterService clusterServic } /** - * Returns a {@link LocalNodeClusterManagerListener} that will publish hashes of all the settings passed in the constructor. These hashes are + * Returns a {@link LocalNodeMasterListener} that will publish hashes of all the settings passed in the constructor. These hashes are * published by the cluster-manager node only. Note that this is not designed for {@link SecureSettings} implementations that are mutable. */ - public LocalNodeClusterManagerListener newHashPublisher() { + public LocalNodeMasterListener newHashPublisher() { // eagerly compute hashes to be published final Map computedHashesOfConsistentSettings = computeHashesOfConsistentSecureSettings(); return new HashesPublisher(computedHashesOfConsistentSettings, clusterService); @@ -246,7 +246,7 @@ private byte[] computeSaltedPBKDF2Hash(byte[] bytes, byte[] salt) { } } - static final class HashesPublisher implements LocalNodeClusterManagerListener { + static final class HashesPublisher implements LocalNodeMasterListener { // eagerly compute hashes to be published final Map computedHashesOfConsistentSettings; diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/BaseFuture.java b/server/src/main/java/org/opensearch/common/util/concurrent/BaseFuture.java index e4f8e1a221a0d..fef37299b349d 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/BaseFuture.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/BaseFuture.java @@ -33,7 +33,7 @@ package org.opensearch.common.util.concurrent; import org.opensearch.cluster.service.ClusterApplierService; -import org.opensearch.cluster.service.ClusterManagerService; +import org.opensearch.cluster.service.MasterService; import org.opensearch.common.Nullable; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transports; @@ -109,7 +109,7 @@ protected boolean blockingAllowed() { return Transports.assertNotTransportThread(BLOCKING_OP_REASON) && ThreadPool.assertNotScheduleThread(BLOCKING_OP_REASON) && ClusterApplierService.assertNotClusterStateUpdateThread(BLOCKING_OP_REASON) - && ClusterManagerService.assertNotMasterUpdateThread(BLOCKING_OP_REASON); + && MasterService.assertNotMasterUpdateThread(BLOCKING_OP_REASON); } @Override diff --git a/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java b/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java index 44f44fa055b2b..6b746e5963bdc 100644 --- a/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java +++ b/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java @@ -41,7 +41,7 @@ import org.opensearch.cluster.routing.RerouteService; import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.service.ClusterApplier; -import org.opensearch.cluster.service.ClusterManagerService; +import org.opensearch.cluster.service.MasterService; import org.opensearch.common.Randomness; import org.opensearch.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.network.NetworkService; @@ -121,7 +121,7 @@ public DiscoveryModule( TransportService transportService, NamedWriteableRegistry namedWriteableRegistry, NetworkService networkService, - ClusterManagerService clusterManagerService, + MasterService masterService, ClusterApplier clusterApplier, ClusterSettings clusterSettings, List plugins, @@ -197,7 +197,7 @@ public DiscoveryModule( transportService, namedWriteableRegistry, allocationService, - clusterManagerService, + masterService, gatewayMetaState::getPersistedState, seedHostsProvider, clusterApplier, diff --git a/server/src/main/java/org/opensearch/discovery/MasterNotDiscoveredException.java b/server/src/main/java/org/opensearch/discovery/MasterNotDiscoveredException.java new file mode 100644 index 0000000000000..7ed21f924b27c --- /dev/null +++ b/server/src/main/java/org/opensearch/discovery/MasterNotDiscoveredException.java @@ -0,0 +1,63 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.discovery; + +import org.opensearch.common.io.stream.StreamInput; + +import java.io.IOException; + +/** + * Exception when the cluster-manager is not discovered + * + * @opensearch.internal + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link ClusterManagerNotDiscoveredException} + */ +@Deprecated +public class MasterNotDiscoveredException extends ClusterManagerNotDiscoveredException { + + public MasterNotDiscoveredException() { + super(); + } + + public MasterNotDiscoveredException(Throwable cause) { + super(cause); + } + + public MasterNotDiscoveredException(String message) { + super(message); + } + + public MasterNotDiscoveredException(StreamInput in) throws IOException { + super(in); + } +} diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestMasterAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestMasterAction.java new file mode 100644 index 0000000000000..20f7b01ef2b42 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestMasterAction.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.rest.action.cat; + +/** + * _cat API action to list cluster_manager information + * + * @opensearch.api + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link RestClusterManagerAction} + */ +@Deprecated +public class RestMasterAction extends RestClusterManagerAction { + +} diff --git a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java index 2af68bbedb456..c0d30a92e2d4d 100644 --- a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java @@ -44,7 +44,7 @@ import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.action.support.replication.ReplicationOperation; import org.opensearch.client.AbstractClientHeadersTestCase; -import org.opensearch.cluster.NotClusterManagerException; +import org.opensearch.cluster.NotMasterException; import org.opensearch.cluster.action.shard.ShardStateAction; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.coordination.CoordinationStateRejectedException; @@ -70,7 +70,7 @@ import org.opensearch.common.util.CancellableThreadsTests; import org.opensearch.common.util.set.Sets; import org.opensearch.common.xcontent.XContentLocation; -import org.opensearch.discovery.ClusterManagerNotDiscoveredException; +import org.opensearch.discovery.MasterNotDiscoveredException; import org.opensearch.env.ShardLockObtainFailedException; import org.opensearch.index.Index; import org.opensearch.index.engine.RecoveryEngineException; @@ -237,6 +237,9 @@ public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOEx Files.walkFileTree(testStartPath, visitor); assertTrue(notRegistered.remove(TestException.class)); assertTrue(notRegistered.remove(UnknownHeaderException.class)); + // Remove the deprecated exception classes from the unregistered list. + assertTrue(notRegistered.remove(NotMasterException.class)); + assertTrue(notRegistered.remove(MasterNotDiscoveredException.class)); assertTrue("Classes subclassing OpenSearchException must be registered \n" + notRegistered.toString(), notRegistered.isEmpty()); assertTrue(registered.removeAll(OpenSearchException.getRegisteredKeys())); // check assertEquals(registered.toString(), 0, registered.size()); @@ -697,7 +700,7 @@ public void testIds() { ids.put(0, org.opensearch.index.snapshots.IndexShardSnapshotFailedException.class); ids.put(1, org.opensearch.search.dfs.DfsPhaseExecutionException.class); ids.put(2, org.opensearch.common.util.CancellableThreads.ExecutionCancelledException.class); - ids.put(3, ClusterManagerNotDiscoveredException.class); + ids.put(3, org.opensearch.discovery.ClusterManagerNotDiscoveredException.class); ids.put(4, org.opensearch.OpenSearchSecurityException.class); ids.put(5, org.opensearch.index.snapshots.IndexShardRestoreException.class); ids.put(6, org.opensearch.indices.IndexClosedException.class); @@ -835,7 +838,7 @@ public void testIds() { ids.put(141, org.opensearch.index.query.QueryShardException.class); ids.put(142, ShardStateAction.NoLongerPrimaryShardException.class); ids.put(143, org.opensearch.script.ScriptException.class); - ids.put(144, NotClusterManagerException.class); + ids.put(144, org.opensearch.cluster.NotClusterManagerException.class); ids.put(145, org.opensearch.OpenSearchStatusException.class); ids.put(146, org.opensearch.tasks.TaskCancelledException.class); ids.put(147, org.opensearch.env.ShardLockObtainFailedException.class); diff --git a/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java b/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java index 976ce5cfee63e..b1ebc02842d25 100644 --- a/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java +++ b/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java @@ -67,6 +67,7 @@ import org.opensearch.rest.action.admin.cluster.RestGetStoredScriptAction; import org.opensearch.rest.action.admin.cluster.RestPutStoredScriptAction; import org.opensearch.rest.action.cat.RestAllocationAction; +import org.opensearch.rest.action.cat.RestMasterAction; import org.opensearch.rest.action.cat.RestRepositoriesAction; import org.opensearch.rest.action.cat.RestThreadPoolAction; import org.opensearch.rest.action.cat.RestClusterManagerAction; @@ -167,6 +168,13 @@ public void testCatClusterManager() { assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); } + public void testCatMaster() { + RestMasterAction action = new RestMasterAction(); + Exception e = assertThrows(OpenSearchParseException.class, () -> action.doCatRequest(getRestRequestWithBothParams(), client)); + assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); + assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); + } + public void testCatNodeattrs() { RestNodeAttrsAction action = new RestNodeAttrsAction(); Exception e = assertThrows(OpenSearchParseException.class, () -> action.doCatRequest(getRestRequestWithBothParams(), client)); diff --git a/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java b/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java index 6005ca9150488..7f09f1eb8e2be 100644 --- a/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java +++ b/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java @@ -48,8 +48,8 @@ import org.opensearch.cluster.service.ClusterApplier; import org.opensearch.cluster.service.ClusterApplierService; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.cluster.service.FakeThreadPoolClusterManagerService; -import org.opensearch.cluster.service.ClusterManagerService; +import org.opensearch.cluster.service.FakeThreadPoolMasterService; +import org.opensearch.cluster.service.MasterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor; @@ -87,14 +87,14 @@ protected PrioritizedOpenSearchThreadPoolExecutor createThreadPoolExecutor() { } }; - final ClusterManagerService clusterManagerService = new FakeThreadPoolClusterManagerService( + final MasterService masterService = new FakeThreadPoolMasterService( "test", "clusterManagerService", threadPool, r -> { fail("cluster-manager service should not run any tasks"); } ); - final ClusterService clusterService = new ClusterService(settings, clusterSettings, clusterManagerService, clusterApplierService); + final ClusterService clusterService = new ClusterService(settings, clusterSettings, masterService, clusterApplierService); final FakeClusterInfoServiceClient client = new FakeClusterInfoServiceClient(threadPool); final InternalClusterInfoService clusterInfoService = new InternalClusterInfoService(settings, clusterService, threadPool, client); @@ -103,8 +103,8 @@ protected PrioritizedOpenSearchThreadPoolExecutor createThreadPoolExecutor() { clusterService.setNodeConnectionsService(ClusterServiceUtils.createNoOpNodeConnectionsService()); clusterApplierService.setInitialState(ClusterState.builder(new ClusterName("cluster")).nodes(noClusterManager).build()); - clusterManagerService.setClusterStatePublisher((clusterChangedEvent, publishListener, ackListener) -> fail("should not publish")); - clusterManagerService.setClusterStateSupplier(clusterApplierService::state); + masterService.setClusterStatePublisher((clusterChangedEvent, publishListener, ackListener) -> fail("should not publish")); + masterService.setClusterStateSupplier(clusterApplierService::state); clusterService.start(); final AtomicBoolean becameClusterManager1 = new AtomicBoolean(); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java index 3606d24222679..806468ed5e761 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java @@ -43,9 +43,9 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; -import org.opensearch.cluster.service.FakeThreadPoolClusterManagerService; -import org.opensearch.cluster.service.ClusterManagerService; -import org.opensearch.cluster.service.ClusterManagerServiceTests; +import org.opensearch.cluster.service.FakeThreadPoolMasterService; +import org.opensearch.cluster.service.MasterService; +import org.opensearch.cluster.service.MasterServiceTests; import org.opensearch.common.Randomness; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; @@ -98,7 +98,7 @@ public class NodeJoinTests extends OpenSearchTestCase { private static ThreadPool threadPool; - private ClusterManagerService clusterManagerService; + private MasterService masterService; private Coordinator coordinator; private DeterministicTaskQueue deterministicTaskQueue; private Transport transport; @@ -117,7 +117,7 @@ public static void afterClass() { @After public void tearDown() throws Exception { super.tearDown(); - clusterManagerService.close(); + masterService.close(); } private static ClusterState initialState(DiscoveryNode localNode, long term, long version, VotingConfiguration config) { @@ -144,7 +144,7 @@ private void setupFakeClusterManagerServiceAndCoordinator(long term, ClusterStat random() ); final ThreadPool fakeThreadPool = deterministicTaskQueue.getThreadPool(); - FakeThreadPoolClusterManagerService fakeClusterManagerService = new FakeThreadPoolClusterManagerService( + FakeThreadPoolMasterService fakeClusterManagerService = new FakeThreadPoolMasterService( "test_node", "test", fakeThreadPool, @@ -166,40 +166,40 @@ private void setupFakeClusterManagerServiceAndCoordinator(long term, ClusterStat } private void setupRealClusterManagerServiceAndCoordinator(long term, ClusterState initialState) { - ClusterManagerService clusterManagerService = new ClusterManagerService( + MasterService masterService = new MasterService( Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test_node").build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool ); AtomicReference clusterStateRef = new AtomicReference<>(initialState); - clusterManagerService.setClusterStatePublisher((event, publishListener, ackListener) -> { + masterService.setClusterStatePublisher((event, publishListener, ackListener) -> { clusterStateRef.set(event.state()); publishListener.onResponse(null); }); setupClusterManagerServiceAndCoordinator( term, initialState, - clusterManagerService, + masterService, threadPool, new Random(Randomness.get().nextLong()), () -> new StatusInfo(HEALTHY, "healthy-info") ); - clusterManagerService.setClusterStateSupplier(clusterStateRef::get); - clusterManagerService.start(); + masterService.setClusterStateSupplier(clusterStateRef::get); + masterService.start(); } private void setupClusterManagerServiceAndCoordinator( long term, ClusterState initialState, - ClusterManagerService clusterManagerService, + MasterService masterService, ThreadPool threadPool, Random random, NodeHealthService nodeHealthService ) { - if (this.clusterManagerService != null || coordinator != null) { + if (this.masterService != null || coordinator != null) { throw new IllegalStateException("method setupClusterManagerServiceAndCoordinator can only be called once"); } - this.clusterManagerService = clusterManagerService; + this.masterService = masterService; CapturingTransport capturingTransport = new CapturingTransport() { @Override protected void onSendRequest(long requestId, String action, TransportRequest request, DiscoveryNode destination) { @@ -231,7 +231,7 @@ protected void onSendRequest(long requestId, String action, TransportRequest req transportService, writableRegistry(), OpenSearchAllocationTestCase.createAllocationService(Settings.EMPTY), - clusterManagerService, + masterService, () -> new InMemoryPersistedState(term, initialState), r -> emptyList(), new NoOpClusterApplier(), @@ -514,7 +514,7 @@ public void testJoinUpdateVotingConfigExclusion() throws Exception { ); assertTrue( - ClusterManagerServiceTests.discoveryState(clusterManagerService) + MasterServiceTests.discoveryState(masterService) .getVotingConfigExclusions() .stream() .anyMatch( @@ -746,7 +746,7 @@ public void testConcurrentJoining() { throw new RuntimeException(e); } - assertTrue(ClusterManagerServiceTests.discoveryState(clusterManagerService).nodes().isLocalNodeElectedMaster()); + assertTrue(MasterServiceTests.discoveryState(masterService).nodes().isLocalNodeElectedMaster()); for (DiscoveryNode successfulNode : successfulNodes) { assertTrue(successfulNode + " joined cluster", clusterStateHasNode(successfulNode)); assertFalse(successfulNode + " voted for cluster-manager", coordinator.missingJoinVoteFrom(successfulNode)); @@ -776,10 +776,10 @@ public void testJoinElectedLeaderWithDeprecatedMasterRole() { } private boolean isLocalNodeElectedMaster() { - return ClusterManagerServiceTests.discoveryState(clusterManagerService).nodes().isLocalNodeElectedMaster(); + return MasterServiceTests.discoveryState(masterService).nodes().isLocalNodeElectedMaster(); } private boolean clusterStateHasNode(DiscoveryNode node) { - return node.equals(ClusterManagerServiceTests.discoveryState(clusterManagerService).nodes().get(node.getId())); + return node.equals(MasterServiceTests.discoveryState(masterService).nodes().get(node.getId())); } } diff --git a/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java b/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java index 166fb7a935009..a603dac9f42ca 100644 --- a/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java @@ -39,6 +39,7 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateObserver; import org.opensearch.cluster.LocalNodeClusterManagerListener; +import org.opensearch.cluster.LocalNodeMasterListener; import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.coordination.NoClusterManagerBlockService; import org.opensearch.cluster.metadata.Metadata; @@ -331,6 +332,43 @@ public void offMaster() { timedClusterApplierService.close(); } + /* Validate the backwards compatibility of LocalNodeMasterListener remains + * after making it a subclass of LocalNodeClusterManagerListener. + * Overriding the methods with non-inclusive words are intentional. + * To support inclusive language, LocalNodeMasterListener is deprecated in 2.2. + */ + public void testDeprecatedLocalNodeMasterListenerCallbacks() { + TimedClusterApplierService timedClusterApplierService = createTimedClusterService(false); + + AtomicBoolean isClusterManager = new AtomicBoolean(); + timedClusterApplierService.addLocalNodeMasterListener(new LocalNodeMasterListener() { + @Override + public void onMaster() { + isClusterManager.set(true); + } + + @Override + public void offMaster() { + isClusterManager.set(false); + } + }); + + ClusterState state = timedClusterApplierService.state(); + DiscoveryNodes nodes = state.nodes(); + DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(nodes).masterNodeId(nodes.getLocalNodeId()); + state = ClusterState.builder(state).nodes(nodesBuilder).build(); + setState(timedClusterApplierService, state); + assertThat(isClusterManager.get(), is(true)); + + nodes = state.nodes(); + nodesBuilder = DiscoveryNodes.builder(nodes).masterNodeId(null); + state = ClusterState.builder(state).nodes(nodesBuilder).build(); + setState(timedClusterApplierService, state); + assertThat(isClusterManager.get(), is(false)); + + timedClusterApplierService.close(); + } + public void testClusterStateApplierCantSampleClusterState() throws InterruptedException { AtomicReference error = new AtomicReference<>(); AtomicBoolean applierCalled = new AtomicBoolean(); diff --git a/server/src/test/java/org/opensearch/cluster/service/ClusterManagerServiceRenamedSettingTests.java b/server/src/test/java/org/opensearch/cluster/service/MasterServiceRenamedSettingTests.java similarity index 68% rename from server/src/test/java/org/opensearch/cluster/service/ClusterManagerServiceRenamedSettingTests.java rename to server/src/test/java/org/opensearch/cluster/service/MasterServiceRenamedSettingTests.java index e71d872c87527..acf089dc43b56 100644 --- a/server/src/test/java/org/opensearch/cluster/service/ClusterManagerServiceRenamedSettingTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/MasterServiceRenamedSettingTests.java @@ -22,7 +22,7 @@ * after it is deprecated, so that the backwards compatibility is maintained. * The test can be removed along with removing support of the deprecated setting. */ -public class ClusterManagerServiceRenamedSettingTests extends OpenSearchTestCase { +public class MasterServiceRenamedSettingTests extends OpenSearchTestCase { /** * Validate the both settings are known and supported. @@ -33,8 +33,8 @@ public void testClusterManagerServiceSettingsExist() { "Both 'cluster.service.slow_cluster_manager_task_logging_threshold' and its predecessor should be supported built-in settings", settings.containsAll( Arrays.asList( - ClusterManagerService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, - ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING + MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, + MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING ) ) ); @@ -45,8 +45,8 @@ public void testClusterManagerServiceSettingsExist() { */ public void testSettingFallback() { assertEquals( - ClusterManagerService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY), - ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY) + MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY), + MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY) ); } @@ -57,11 +57,11 @@ public void testSettingGetValue() { Settings settings = Settings.builder().put("cluster.service.slow_cluster_manager_task_logging_threshold", "9s").build(); assertEquals( TimeValue.timeValueSeconds(9), - ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings) + MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings) ); assertEquals( - ClusterManagerService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.getDefault(Settings.EMPTY), - ClusterManagerService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings) + MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.getDefault(Settings.EMPTY), + MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings) ); } @@ -73,10 +73,10 @@ public void testSettingGetValueWithFallback() { Settings settings = Settings.builder().put("cluster.service.slow_master_task_logging_threshold", "8s").build(); assertEquals( TimeValue.timeValueSeconds(8), - ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings) + MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings) ); - assertSettingDeprecationsAndWarnings(new Setting[] { ClusterManagerService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING }); + assertSettingDeprecationsAndWarnings(new Setting[] { MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING }); } /** @@ -89,11 +89,11 @@ public void testSettingGetValueWhenBothAreConfigured() { .build(); assertEquals( TimeValue.timeValueSeconds(9), - ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings) + MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings) ); - assertEquals(TimeValue.timeValueSeconds(8), ClusterManagerService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings)); - assertSettingDeprecationsAndWarnings(new Setting[] { ClusterManagerService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING }); + assertEquals(TimeValue.timeValueSeconds(8), MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings)); + assertSettingDeprecationsAndWarnings(new Setting[] { MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING }); } } diff --git a/server/src/test/java/org/opensearch/cluster/service/ClusterManagerServiceTests.java b/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java similarity index 87% rename from server/src/test/java/org/opensearch/cluster/service/ClusterManagerServiceTests.java rename to server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java index 7c7a725032b52..c829bef576be5 100644 --- a/server/src/test/java/org/opensearch/cluster/service/ClusterManagerServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java @@ -93,14 +93,14 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasKey; -public class ClusterManagerServiceTests extends OpenSearchTestCase { +public class MasterServiceTests extends OpenSearchTestCase { private static ThreadPool threadPool; private static long relativeTimeInMillis; @BeforeClass public static void createThreadPool() { - threadPool = new TestThreadPool(ClusterManagerServiceTests.class.getName()) { + threadPool = new TestThreadPool(MasterServiceTests.class.getName()) { @Override public long relativeTimeInMillis() { return relativeTimeInMillis; @@ -121,17 +121,17 @@ public void randomizeCurrentTime() { relativeTimeInMillis = randomLongBetween(0L, 1L << 62); } - private ClusterManagerService createClusterManagerService(boolean makeClusterManager) { + private MasterService createClusterManagerService(boolean makeClusterManager) { final DiscoveryNode localNode = new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); - final ClusterManagerService clusterManagerService = new ClusterManagerService( + final MasterService masterService = new MasterService( Settings.builder() - .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), ClusterManagerServiceTests.class.getSimpleName()) + .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), MasterServiceTests.class.getSimpleName()) .put(Node.NODE_NAME_SETTING.getKey(), "test_node") .build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool ); - final ClusterState initialClusterState = ClusterState.builder(new ClusterName(ClusterManagerServiceTests.class.getSimpleName())) + final ClusterState initialClusterState = ClusterState.builder(new ClusterName(MasterServiceTests.class.getSimpleName())) .nodes( DiscoveryNodes.builder() .add(localNode) @@ -141,17 +141,17 @@ private ClusterManagerService createClusterManagerService(boolean makeClusterMan .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK) .build(); final AtomicReference clusterStateRef = new AtomicReference<>(initialClusterState); - clusterManagerService.setClusterStatePublisher((event, publishListener, ackListener) -> { + masterService.setClusterStatePublisher((event, publishListener, ackListener) -> { clusterStateRef.set(event.state()); publishListener.onResponse(null); }); - clusterManagerService.setClusterStateSupplier(clusterStateRef::get); - clusterManagerService.start(); - return clusterManagerService; + masterService.setClusterStateSupplier(clusterStateRef::get); + masterService.start(); + return masterService; } public void testClusterManagerAwareExecution() throws Exception { - final ClusterManagerService nonClusterManager = createClusterManagerService(false); + final MasterService nonClusterManager = createClusterManagerService(false); final boolean[] taskFailed = { false }; final CountDownLatch latch1 = new CountDownLatch(1); @@ -194,7 +194,7 @@ public void onFailure(String source, Exception e) { } public void testThreadContext() throws InterruptedException { - final ClusterManagerService clusterManager = createClusterManagerService(true); + final MasterService masterService = createClusterManagerService(true); final CountDownLatch latch = new CountDownLatch(1); try (ThreadContext.StoredContext ignored = threadPool.getThreadContext().stashContext()) { @@ -208,7 +208,7 @@ public void testThreadContext() throws InterruptedException { final TimeValue ackTimeout = randomBoolean() ? TimeValue.ZERO : TimeValue.timeValueMillis(randomInt(10000)); final TimeValue clusterManagerTimeout = randomBoolean() ? TimeValue.ZERO : TimeValue.timeValueMillis(randomInt(10000)); - clusterManager.submitStateUpdateTask("test", new AckedClusterStateUpdateTask(null, null) { + masterService.submitStateUpdateTask("test", new AckedClusterStateUpdateTask(null, null) { @Override public ClusterState execute(ClusterState currentState) { assertTrue(threadPool.getThreadContext().isSystemContext()); @@ -280,7 +280,7 @@ public void onAckTimeout() { latch.await(); - clusterManager.close(); + masterService.close(); } /* @@ -292,8 +292,8 @@ public void testClusterStateTaskListenerThrowingExceptionIsOkay() throws Interru final CountDownLatch latch = new CountDownLatch(1); AtomicBoolean published = new AtomicBoolean(); - try (ClusterManagerService clusterManagerService = createClusterManagerService(true)) { - clusterManagerService.submitStateUpdateTask( + try (MasterService masterService = createClusterManagerService(true)) { + masterService.submitStateUpdateTask( "testClusterStateTaskListenerThrowingExceptionIsOkay", new Object(), ClusterStateTaskConfig.build(Priority.NORMAL), @@ -328,11 +328,11 @@ public void onFailure(String source, Exception e) {} @TestLogging(value = "org.opensearch.cluster.service:TRACE", reason = "to ensure that we log cluster state events on TRACE level") public void testClusterStateUpdateLogging() throws Exception { - try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(ClusterManagerService.class))) { + try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(MasterService.class))) { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test1 start", - ClusterManagerService.class.getCanonicalName(), + MasterService.class.getCanonicalName(), Level.DEBUG, "executing cluster state update for [test1]" ) @@ -340,7 +340,7 @@ public void testClusterStateUpdateLogging() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test1 computation", - ClusterManagerService.class.getCanonicalName(), + MasterService.class.getCanonicalName(), Level.DEBUG, "took [1s] to compute cluster state update for [test1]" ) @@ -348,7 +348,7 @@ public void testClusterStateUpdateLogging() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test1 notification", - ClusterManagerService.class.getCanonicalName(), + MasterService.class.getCanonicalName(), Level.DEBUG, "took [0s] to notify listeners on unchanged cluster state for [test1]" ) @@ -357,7 +357,7 @@ public void testClusterStateUpdateLogging() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test2 start", - ClusterManagerService.class.getCanonicalName(), + MasterService.class.getCanonicalName(), Level.DEBUG, "executing cluster state update for [test2]" ) @@ -365,7 +365,7 @@ public void testClusterStateUpdateLogging() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test2 failure", - ClusterManagerService.class.getCanonicalName(), + MasterService.class.getCanonicalName(), Level.TRACE, "failed to execute cluster state update (on version: [*], uuid: [*]) for [test2]*" ) @@ -373,7 +373,7 @@ public void testClusterStateUpdateLogging() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test2 computation", - ClusterManagerService.class.getCanonicalName(), + MasterService.class.getCanonicalName(), Level.DEBUG, "took [2s] to compute cluster state update for [test2]" ) @@ -381,7 +381,7 @@ public void testClusterStateUpdateLogging() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test2 notification", - ClusterManagerService.class.getCanonicalName(), + MasterService.class.getCanonicalName(), Level.DEBUG, "took [0s] to notify listeners on unchanged cluster state for [test2]" ) @@ -390,7 +390,7 @@ public void testClusterStateUpdateLogging() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test3 start", - ClusterManagerService.class.getCanonicalName(), + MasterService.class.getCanonicalName(), Level.DEBUG, "executing cluster state update for [test3]" ) @@ -398,7 +398,7 @@ public void testClusterStateUpdateLogging() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test3 computation", - ClusterManagerService.class.getCanonicalName(), + MasterService.class.getCanonicalName(), Level.DEBUG, "took [3s] to compute cluster state update for [test3]" ) @@ -406,7 +406,7 @@ public void testClusterStateUpdateLogging() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test3 notification", - ClusterManagerService.class.getCanonicalName(), + MasterService.class.getCanonicalName(), Level.DEBUG, "took [4s] to notify listeners on successful publication of cluster state (version: *, uuid: *) for [test3]" ) @@ -415,14 +415,14 @@ public void testClusterStateUpdateLogging() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test4", - ClusterManagerService.class.getCanonicalName(), + MasterService.class.getCanonicalName(), Level.DEBUG, "executing cluster state update for [test4]" ) ); - try (ClusterManagerService clusterManagerService = createClusterManagerService(true)) { - clusterManagerService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() { + try (MasterService masterService = createClusterManagerService(true)) { + masterService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { relativeTimeInMillis += TimeValue.timeValueSeconds(1).millis(); @@ -437,7 +437,7 @@ public void onFailure(String source, Exception e) { fail(); } }); - clusterManagerService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() { + masterService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { relativeTimeInMillis += TimeValue.timeValueSeconds(2).millis(); @@ -452,7 +452,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS @Override public void onFailure(String source, Exception e) {} }); - clusterManagerService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() { + masterService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { relativeTimeInMillis += TimeValue.timeValueSeconds(3).millis(); @@ -469,7 +469,7 @@ public void onFailure(String source, Exception e) { fail(); } }); - clusterManagerService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() { + masterService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { return currentState; @@ -617,7 +617,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS } }; - try (ClusterManagerService clusterManagerService = createClusterManagerService(true)) { + try (MasterService masterService = createClusterManagerService(true)) { final ConcurrentMap submittedTasksPerThread = new ConcurrentHashMap<>(); CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads); for (int i = 0; i < numberOfThreads; i++) { @@ -632,7 +632,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS submittedTasksPerThread.computeIfAbsent(threadName, key -> new AtomicInteger()).addAndGet(tasks.size()); final TaskExecutor executor = assignment.v1(); if (tasks.size() == 1) { - clusterManagerService.submitStateUpdateTask( + masterService.submitStateUpdateTask( threadName, tasks.stream().findFirst().get(), ClusterStateTaskConfig.build(randomFrom(Priority.values())), @@ -642,7 +642,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS } else { Map taskListeners = new HashMap<>(); tasks.forEach(t -> taskListeners.put(t, listener)); - clusterManagerService.submitStateUpdateTasks( + masterService.submitStateUpdateTasks( threadName, taskListeners, ClusterStateTaskConfig.build(randomFrom(Priority.values())), @@ -696,8 +696,8 @@ public void testBlockingCallInClusterStateTaskListenerFails() throws Interrupted final CountDownLatch latch = new CountDownLatch(1); final AtomicReference assertionRef = new AtomicReference<>(); - try (ClusterManagerService clusterManagerService = createClusterManagerService(true)) { - clusterManagerService.submitStateUpdateTask( + try (MasterService masterService = createClusterManagerService(true)) { + masterService.submitStateUpdateTask( "testBlockingCallInClusterStateTaskListenerFails", new Object(), ClusterStateTaskConfig.build(Priority.NORMAL), @@ -737,11 +737,11 @@ public void onFailure(String source, Exception e) {} @TestLogging(value = "org.opensearch.cluster.service:WARN", reason = "to ensure that we log cluster state events on WARN level") public void testLongClusterStateUpdateLogging() throws Exception { - try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(ClusterManagerService.class))) { + try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(MasterService.class))) { mockAppender.addExpectation( new MockLogAppender.UnseenEventExpectation( "test1 shouldn't log because it was fast enough", - ClusterManagerService.class.getCanonicalName(), + MasterService.class.getCanonicalName(), Level.WARN, "*took*test1*" ) @@ -749,7 +749,7 @@ public void testLongClusterStateUpdateLogging() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test2", - ClusterManagerService.class.getCanonicalName(), + MasterService.class.getCanonicalName(), Level.WARN, "*took [*], which is over [10s], to compute cluster state update for [test2]" ) @@ -757,7 +757,7 @@ public void testLongClusterStateUpdateLogging() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test3", - ClusterManagerService.class.getCanonicalName(), + MasterService.class.getCanonicalName(), Level.WARN, "*took [*], which is over [10s], to compute cluster state update for [test3]" ) @@ -765,7 +765,7 @@ public void testLongClusterStateUpdateLogging() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test4", - ClusterManagerService.class.getCanonicalName(), + MasterService.class.getCanonicalName(), Level.WARN, "*took [*], which is over [10s], to compute cluster state update for [test4]" ) @@ -773,7 +773,7 @@ public void testLongClusterStateUpdateLogging() throws Exception { mockAppender.addExpectation( new MockLogAppender.UnseenEventExpectation( "test5 should not log despite publishing slowly", - ClusterManagerService.class.getCanonicalName(), + MasterService.class.getCanonicalName(), Level.WARN, "*took*test5*" ) @@ -781,16 +781,16 @@ public void testLongClusterStateUpdateLogging() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test6 should log due to slow and failing publication", - ClusterManagerService.class.getCanonicalName(), + MasterService.class.getCanonicalName(), Level.WARN, "took [*] and then failed to publish updated cluster state (version: *, uuid: *) for [test6]:*" ) ); try ( - ClusterManagerService clusterManagerService = new ClusterManagerService( + MasterService masterService = new MasterService( Settings.builder() - .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), ClusterManagerServiceTests.class.getSimpleName()) + .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), MasterServiceTests.class.getSimpleName()) .put(Node.NODE_NAME_SETTING.getKey(), "test_node") .build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), @@ -805,21 +805,19 @@ public void testLongClusterStateUpdateLogging() throws Exception { emptySet(), Version.CURRENT ); - final ClusterState initialClusterState = ClusterState.builder( - new ClusterName(ClusterManagerServiceTests.class.getSimpleName()) - ) + final ClusterState initialClusterState = ClusterState.builder(new ClusterName(MasterServiceTests.class.getSimpleName())) .nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).masterNodeId(localNode.getId())) .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK) .build(); final AtomicReference clusterStateRef = new AtomicReference<>(initialClusterState); - clusterManagerService.setClusterStatePublisher((event, publishListener, ackListener) -> { + masterService.setClusterStatePublisher((event, publishListener, ackListener) -> { if (event.source().contains("test5")) { - relativeTimeInMillis += ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( + relativeTimeInMillis += MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( Settings.EMPTY ).millis() + randomLongBetween(1, 1000000); } if (event.source().contains("test6")) { - relativeTimeInMillis += ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( + relativeTimeInMillis += MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( Settings.EMPTY ).millis() + randomLongBetween(1, 1000000); throw new OpenSearchException("simulated error during slow publication which should trigger logging"); @@ -827,17 +825,17 @@ public void testLongClusterStateUpdateLogging() throws Exception { clusterStateRef.set(event.state()); publishListener.onResponse(null); }); - clusterManagerService.setClusterStateSupplier(clusterStateRef::get); - clusterManagerService.start(); + masterService.setClusterStateSupplier(clusterStateRef::get); + masterService.start(); final CountDownLatch latch = new CountDownLatch(6); final CountDownLatch processedFirstTask = new CountDownLatch(1); - clusterManagerService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() { + masterService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { relativeTimeInMillis += randomLongBetween( 0L, - ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY).millis() + MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY).millis() ); return currentState; } @@ -855,10 +853,10 @@ public void onFailure(String source, Exception e) { }); processedFirstTask.await(); - clusterManagerService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() { + masterService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { - relativeTimeInMillis += ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( + relativeTimeInMillis += MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( Settings.EMPTY ).millis() + randomLongBetween(1, 1000000); throw new IllegalArgumentException("Testing handling of exceptions in the cluster state task"); @@ -874,10 +872,10 @@ public void onFailure(String source, Exception e) { latch.countDown(); } }); - clusterManagerService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() { + masterService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { - relativeTimeInMillis += ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( + relativeTimeInMillis += MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( Settings.EMPTY ).millis() + randomLongBetween(1, 1000000); return ClusterState.builder(currentState).incrementVersion().build(); @@ -893,10 +891,10 @@ public void onFailure(String source, Exception e) { fail(); } }); - clusterManagerService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() { + masterService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { - relativeTimeInMillis += ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( + relativeTimeInMillis += MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( Settings.EMPTY ).millis() + randomLongBetween(1, 1000000); return currentState; @@ -912,7 +910,7 @@ public void onFailure(String source, Exception e) { fail(); } }); - clusterManagerService.submitStateUpdateTask("test5", new ClusterStateUpdateTask() { + masterService.submitStateUpdateTask("test5", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { return ClusterState.builder(currentState).incrementVersion().build(); @@ -928,7 +926,7 @@ public void onFailure(String source, Exception e) { fail(); } }); - clusterManagerService.submitStateUpdateTask("test6", new ClusterStateUpdateTask() { + masterService.submitStateUpdateTask("test6", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { return ClusterState.builder(currentState).incrementVersion().build(); @@ -946,7 +944,7 @@ public void onFailure(String source, Exception e) { }); // Additional update task to make sure all previous logging made it to the loggerName // We don't check logging for this on since there is no guarantee that it will occur before our check - clusterManagerService.submitStateUpdateTask("test7", new ClusterStateUpdateTask() { + masterService.submitStateUpdateTask("test7", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { return currentState; @@ -973,9 +971,9 @@ public void testAcking() throws InterruptedException { final DiscoveryNode node2 = new DiscoveryNode("node2", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); final DiscoveryNode node3 = new DiscoveryNode("node3", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); try ( - ClusterManagerService clusterManagerService = new ClusterManagerService( + MasterService masterService = new MasterService( Settings.builder() - .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), ClusterManagerServiceTests.class.getSimpleName()) + .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), MasterServiceTests.class.getSimpleName()) .put(Node.NODE_NAME_SETTING.getKey(), "test_node") .build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), @@ -983,14 +981,14 @@ public void testAcking() throws InterruptedException { ) ) { - final ClusterState initialClusterState = ClusterState.builder(new ClusterName(ClusterManagerServiceTests.class.getSimpleName())) + final ClusterState initialClusterState = ClusterState.builder(new ClusterName(MasterServiceTests.class.getSimpleName())) .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3).localNodeId(node1.getId()).masterNodeId(node1.getId())) .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK) .build(); final AtomicReference publisherRef = new AtomicReference<>(); - clusterManagerService.setClusterStatePublisher((e, pl, al) -> publisherRef.get().publish(e, pl, al)); - clusterManagerService.setClusterStateSupplier(() -> initialClusterState); - clusterManagerService.start(); + masterService.setClusterStatePublisher((e, pl, al) -> publisherRef.get().publish(e, pl, al)); + masterService.setClusterStateSupplier(() -> initialClusterState); + masterService.start(); // check that we don't time out before even committing the cluster state { @@ -1002,7 +1000,7 @@ public void testAcking() throws InterruptedException { ) ); - clusterManagerService.submitStateUpdateTask("test2", new AckedClusterStateUpdateTask(null, null) { + masterService.submitStateUpdateTask("test2", new AckedClusterStateUpdateTask(null, null) { @Override public ClusterState execute(ClusterState currentState) { return ClusterState.builder(currentState).build(); @@ -1057,7 +1055,7 @@ public void onAckTimeout() { ackListener.onNodeAck(node3, null); }); - clusterManagerService.submitStateUpdateTask("test2", new AckedClusterStateUpdateTask(null, null) { + masterService.submitStateUpdateTask("test2", new AckedClusterStateUpdateTask(null, null) { @Override public ClusterState execute(ClusterState currentState) { return ClusterState.builder(currentState).build(); @@ -1103,8 +1101,8 @@ public void onAckTimeout() { /** * Returns the cluster state that the cluster-manager service uses (and that is provided by the discovery layer) */ - public static ClusterState discoveryState(ClusterManagerService clusterManagerService) { - return clusterManagerService.state(); + public static ClusterState discoveryState(MasterService masterService) { + return masterService.state(); } } diff --git a/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java b/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java index 5588e9c1ceba8..d1e3f406b4933 100644 --- a/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java +++ b/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java @@ -37,7 +37,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.RerouteService; import org.opensearch.cluster.service.ClusterApplier; -import org.opensearch.cluster.service.ClusterManagerService; +import org.opensearch.cluster.service.MasterService; import org.opensearch.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.ClusterSettings; @@ -70,7 +70,7 @@ public class DiscoveryModuleTests extends OpenSearchTestCase { private TransportService transportService; private NamedWriteableRegistry namedWriteableRegistry; - private ClusterManagerService clusterManagerService; + private MasterService masterService; private ClusterApplier clusterApplier; private ThreadPool threadPool; private ClusterSettings clusterSettings; @@ -93,7 +93,7 @@ public void setupDummyServices() { threadPool = mock(ThreadPool.class); when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null); - clusterManagerService = mock(ClusterManagerService.class); + masterService = mock(MasterService.class); namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); clusterApplier = mock(ClusterApplier.class); clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); @@ -112,7 +112,7 @@ private DiscoveryModule newModule(Settings settings, List plugi transportService, namedWriteableRegistry, null, - clusterManagerService, + masterService, clusterApplier, clusterSettings, plugins, diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 6faaa6973953c..7e0f977e6e835 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -144,8 +144,8 @@ import org.opensearch.cluster.routing.allocation.command.AllocateEmptyPrimaryAllocationCommand; import org.opensearch.cluster.service.ClusterApplierService; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.cluster.service.FakeThreadPoolClusterManagerService; -import org.opensearch.cluster.service.ClusterManagerService; +import org.opensearch.cluster.service.FakeThreadPoolMasterService; +import org.opensearch.cluster.service.MasterService; import org.opensearch.common.CheckedConsumer; import org.opensearch.common.Nullable; import org.opensearch.common.io.stream.NamedWriteableRegistry; @@ -1643,7 +1643,7 @@ private final class TestClusterNode { private final DiscoveryNode node; - private final ClusterManagerService clusterManagerService; + private final MasterService masterService; private final AllocationService allocationService; @@ -1663,18 +1663,13 @@ private final class TestClusterNode { this.node = node; final Environment environment = createEnvironment(node.getName()); threadPool = deterministicTaskQueue.getThreadPool(runnable -> CoordinatorTests.onNodeLog(node, runnable)); - clusterManagerService = new FakeThreadPoolClusterManagerService( - node.getName(), - "test", - threadPool, - deterministicTaskQueue::scheduleNow - ); + masterService = new FakeThreadPoolMasterService(node.getName(), "test", threadPool, deterministicTaskQueue::scheduleNow); final Settings settings = environment.settings(); final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); clusterService = new ClusterService( settings, clusterSettings, - clusterManagerService, + masterService, new ClusterApplierService(node.getName(), settings, clusterSettings, threadPool) { @Override protected PrioritizedOpenSearchThreadPoolExecutor createThreadPoolExecutor() { @@ -2205,7 +2200,7 @@ public void start(ClusterState initialState) { transportService, namedWriteableRegistry, allocationService, - clusterManagerService, + masterService, () -> persistedState, hostsResolver -> nodes.values() .stream() @@ -2219,7 +2214,7 @@ public void start(ClusterState initialState) { ElectionStrategy.DEFAULT_INSTANCE, () -> new StatusInfo(HEALTHY, "healthy-info") ); - clusterManagerService.setClusterStatePublisher(coordinator); + masterService.setClusterStatePublisher(coordinator); coordinator.start(); clusterService.getClusterApplierService().setNodeConnectionsService(nodeConnectionsService); nodeConnectionsService.start(); diff --git a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java index 0e0b466408f77..19551b0adecb2 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java @@ -54,7 +54,7 @@ import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.service.ClusterApplierService; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.cluster.service.FakeThreadPoolClusterManagerService; +import org.opensearch.cluster.service.FakeThreadPoolMasterService; import org.opensearch.common.Nullable; import org.opensearch.common.Randomness; import org.opensearch.common.UUIDs; @@ -557,7 +557,7 @@ void stabilise(long stabilisationDurationMillis) { final ClusterNode leader = getAnyLeader(); final long leaderTerm = leader.coordinator.getCurrentTerm(); - final int pendingTaskCount = leader.clusterManagerService.getFakeMasterServicePendingTaskCount(); + final int pendingTaskCount = leader.masterService.getFakeMasterServicePendingTaskCount(); runFor((pendingTaskCount + 1) * DEFAULT_CLUSTER_STATE_UPDATE_DELAY, "draining task queue"); final Matcher isEqualToLeaderVersion = equalTo(leader.coordinator.getLastAcceptedState().getVersion()); @@ -1025,7 +1025,7 @@ class ClusterNode { private final DiscoveryNode localNode; final MockPersistedState persistedState; final Settings nodeSettings; - private AckedFakeThreadPoolClusterManagerService clusterManagerService; + private AckedFakeThreadPoolMasterService masterService; private DisruptableClusterApplierService clusterApplierService; private ClusterService clusterService; TransportService transportService; @@ -1105,7 +1105,7 @@ protected Optional getDisruptableMockTransport(Transpo null, emptySet() ); - clusterManagerService = new AckedFakeThreadPoolClusterManagerService( + masterService = new AckedFakeThreadPoolMasterService( localNode.getId(), "test", threadPool, @@ -1119,7 +1119,7 @@ protected Optional getDisruptableMockTransport(Transpo deterministicTaskQueue, threadPool ); - clusterService = new ClusterService(settings, clusterSettings, clusterManagerService, clusterApplierService); + clusterService = new ClusterService(settings, clusterSettings, masterService, clusterApplierService); clusterService.setNodeConnectionsService( new NodeConnectionsService(clusterService.getSettings(), threadPool, transportService) ); @@ -1134,7 +1134,7 @@ protected Optional getDisruptableMockTransport(Transpo transportService, writableRegistry(), allocationService, - clusterManagerService, + masterService, this::getPersistedState, Cluster.this::provideSeedHosts, clusterApplierService, @@ -1144,7 +1144,7 @@ protected Optional getDisruptableMockTransport(Transpo getElectionStrategy(), nodeHealthService ); - clusterManagerService.setClusterStatePublisher(coordinator); + masterService.setClusterStatePublisher(coordinator); final GatewayService gatewayService = new GatewayService( settings, allocationService, @@ -1331,11 +1331,11 @@ AckCollector submitUpdateTask( onNode(() -> { logger.trace("[{}] submitUpdateTask: enqueueing [{}]", localNode.getId(), source); final long submittedTerm = coordinator.getCurrentTerm(); - clusterManagerService.submitStateUpdateTask(source, new ClusterStateUpdateTask() { + masterService.submitStateUpdateTask(source, new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { assertThat(currentState.term(), greaterThanOrEqualTo(submittedTerm)); - clusterManagerService.nextAckCollector = ackCollector; + masterService.nextAckCollector = ackCollector; return clusterStateUpdate.apply(currentState); } @@ -1510,11 +1510,11 @@ int getSuccessfulAckIndex(ClusterNode clusterNode) { } } - static class AckedFakeThreadPoolClusterManagerService extends FakeThreadPoolClusterManagerService { + static class AckedFakeThreadPoolMasterService extends FakeThreadPoolMasterService { AckCollector nextAckCollector = new AckCollector(); - AckedFakeThreadPoolClusterManagerService( + AckedFakeThreadPoolMasterService( String nodeName, String serviceName, ThreadPool threadPool, diff --git a/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerService.java b/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolMasterService.java similarity index 96% rename from test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerService.java rename to test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolMasterService.java index 46893fe08bd76..c532a0cc36472 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerService.java +++ b/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolMasterService.java @@ -55,8 +55,8 @@ import static org.apache.lucene.tests.util.LuceneTestCase.random; import static org.opensearch.test.OpenSearchTestCase.randomInt; -public class FakeThreadPoolClusterManagerService extends ClusterManagerService { - private static final Logger logger = LogManager.getLogger(FakeThreadPoolClusterManagerService.class); +public class FakeThreadPoolMasterService extends MasterService { + private static final Logger logger = LogManager.getLogger(FakeThreadPoolMasterService.class); private final String name; private final List pendingTasks = new ArrayList<>(); @@ -65,7 +65,7 @@ public class FakeThreadPoolClusterManagerService extends ClusterManagerService { private boolean taskInProgress = false; private boolean waitForPublish = false; - public FakeThreadPoolClusterManagerService( + public FakeThreadPoolMasterService( String nodeName, String serviceName, ThreadPool threadPool, @@ -137,7 +137,7 @@ public void run() { if (waitForPublish == false) { taskInProgress = false; } - FakeThreadPoolClusterManagerService.this.scheduleNextTaskIfNecessary(); + FakeThreadPoolMasterService.this.scheduleNextTaskIfNecessary(); } }); } diff --git a/test/framework/src/main/java/org/opensearch/test/ClusterServiceUtils.java b/test/framework/src/main/java/org/opensearch/test/ClusterServiceUtils.java index 93c437d3cf91b..99f9b86fb6479 100644 --- a/test/framework/src/main/java/org/opensearch/test/ClusterServiceUtils.java +++ b/test/framework/src/main/java/org/opensearch/test/ClusterServiceUtils.java @@ -47,7 +47,7 @@ import org.opensearch.cluster.service.ClusterApplier.ClusterApplyListener; import org.opensearch.cluster.service.ClusterApplierService; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.cluster.service.ClusterManagerService; +import org.opensearch.cluster.service.MasterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.node.Node; @@ -61,23 +61,23 @@ public class ClusterServiceUtils { - public static ClusterManagerService createMasterService(ThreadPool threadPool, ClusterState initialClusterState) { - ClusterManagerService clusterManagerService = new ClusterManagerService( + public static MasterService createMasterService(ThreadPool threadPool, ClusterState initialClusterState) { + MasterService masterService = new MasterService( Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test_cluster_manager_node").build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool ); AtomicReference clusterStateRef = new AtomicReference<>(initialClusterState); - clusterManagerService.setClusterStatePublisher((event, publishListener, ackListener) -> { + masterService.setClusterStatePublisher((event, publishListener, ackListener) -> { clusterStateRef.set(event.state()); publishListener.onResponse(null); }); - clusterManagerService.setClusterStateSupplier(clusterStateRef::get); - clusterManagerService.start(); - return clusterManagerService; + masterService.setClusterStateSupplier(clusterStateRef::get); + masterService.start(); + return masterService; } - public static ClusterManagerService createMasterService(ThreadPool threadPool, DiscoveryNode localNode) { + public static MasterService createMasterService(ThreadPool threadPool, DiscoveryNode localNode) { ClusterState initialClusterState = ClusterState.builder(new ClusterName(ClusterServiceUtils.class.getSimpleName())) .nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).masterNodeId(localNode.getId())) .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK) @@ -114,7 +114,7 @@ public void onFailure(String source, Exception e) { } } - public static void setState(ClusterManagerService executor, ClusterState clusterState) { + public static void setState(MasterService executor, ClusterState clusterState) { CountDownLatch latch = new CountDownLatch(1); executor.submitStateUpdateTask("test setting state", new ClusterStateUpdateTask() { @Override diff --git a/test/framework/src/main/java/org/opensearch/test/disruption/BlockClusterManagerServiceOnClusterManager.java b/test/framework/src/main/java/org/opensearch/test/disruption/BlockMasterServiceOnMaster.java similarity index 96% rename from test/framework/src/main/java/org/opensearch/test/disruption/BlockClusterManagerServiceOnClusterManager.java rename to test/framework/src/main/java/org/opensearch/test/disruption/BlockMasterServiceOnMaster.java index 8edb508625341..85f8e5c250066 100644 --- a/test/framework/src/main/java/org/opensearch/test/disruption/BlockClusterManagerServiceOnClusterManager.java +++ b/test/framework/src/main/java/org/opensearch/test/disruption/BlockMasterServiceOnMaster.java @@ -43,11 +43,11 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; -public class BlockClusterManagerServiceOnClusterManager extends SingleNodeDisruption { +public class BlockMasterServiceOnMaster extends SingleNodeDisruption { AtomicReference disruptionLatch = new AtomicReference<>(); - public BlockClusterManagerServiceOnClusterManager(Random random) { + public BlockMasterServiceOnMaster(Random random) { super(random); } diff --git a/test/framework/src/main/java/org/opensearch/test/disruption/BusyClusterManagerServiceDisruption.java b/test/framework/src/main/java/org/opensearch/test/disruption/BusyMasterServiceDisruption.java similarity index 95% rename from test/framework/src/main/java/org/opensearch/test/disruption/BusyClusterManagerServiceDisruption.java rename to test/framework/src/main/java/org/opensearch/test/disruption/BusyMasterServiceDisruption.java index 55eb90a4dc89d..4830f9b0359fb 100644 --- a/test/framework/src/main/java/org/opensearch/test/disruption/BusyClusterManagerServiceDisruption.java +++ b/test/framework/src/main/java/org/opensearch/test/disruption/BusyMasterServiceDisruption.java @@ -41,11 +41,11 @@ import java.util.Random; import java.util.concurrent.atomic.AtomicBoolean; -public class BusyClusterManagerServiceDisruption extends SingleNodeDisruption { +public class BusyMasterServiceDisruption extends SingleNodeDisruption { private final AtomicBoolean active = new AtomicBoolean(); private final Priority priority; - public BusyClusterManagerServiceDisruption(Random random, Priority priority) { + public BusyMasterServiceDisruption(Random random, Priority priority) { super(random); this.priority = priority; } diff --git a/test/framework/src/test/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerServiceTests.java b/test/framework/src/test/java/org/opensearch/cluster/service/FakeThreadPoolMasterServiceTests.java similarity index 92% rename from test/framework/src/test/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerServiceTests.java rename to test/framework/src/test/java/org/opensearch/cluster/service/FakeThreadPoolMasterServiceTests.java index f09e62fa574e6..b4cdb0b33391a 100644 --- a/test/framework/src/test/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerServiceTests.java +++ b/test/framework/src/test/java/org/opensearch/cluster/service/FakeThreadPoolMasterServiceTests.java @@ -61,7 +61,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -public class FakeThreadPoolClusterManagerServiceTests extends OpenSearchTestCase { +public class FakeThreadPoolMasterServiceTests extends OpenSearchTestCase { public void testFakeClusterManagerService() { List runnableTasks = new ArrayList<>(); @@ -84,21 +84,21 @@ public void testFakeClusterManagerService() { doAnswer(invocationOnMock -> runnableTasks.add((Runnable) invocationOnMock.getArguments()[0])).when(executorService).execute(any()); when(mockThreadPool.generic()).thenReturn(executorService); - FakeThreadPoolClusterManagerService clusterManagerService = new FakeThreadPoolClusterManagerService( + FakeThreadPoolMasterService masterService = new FakeThreadPoolMasterService( "test_node", "test", mockThreadPool, runnableTasks::add ); - clusterManagerService.setClusterStateSupplier(lastClusterStateRef::get); - clusterManagerService.setClusterStatePublisher((event, publishListener, ackListener) -> { + masterService.setClusterStateSupplier(lastClusterStateRef::get); + masterService.setClusterStatePublisher((event, publishListener, ackListener) -> { lastClusterStateRef.set(event.state()); publishingCallback.set(publishListener); }); - clusterManagerService.start(); + masterService.start(); AtomicBoolean firstTaskCompleted = new AtomicBoolean(); - clusterManagerService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() { + masterService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { return ClusterState.builder(currentState) @@ -138,7 +138,7 @@ public void onFailure(String source, Exception e) { assertThat(runnableTasks.size(), equalTo(0)); AtomicBoolean secondTaskCompleted = new AtomicBoolean(); - clusterManagerService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() { + masterService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { return ClusterState.builder(currentState) From afcd4f73c36d48f4cfd8c444ec406399ee6e2456 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Jul 2022 10:18:46 -0700 Subject: [PATCH 017/104] Bump com.gradle.enterprise from 3.10.2 to 3.10.3 (#3932) Bumps com.gradle.enterprise from 3.10.2 to 3.10.3. --- updated-dependencies: - dependency-name: com.gradle.enterprise dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- settings.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/settings.gradle b/settings.gradle index 14b591ea622f3..65dc6a13100e2 100644 --- a/settings.gradle +++ b/settings.gradle @@ -10,7 +10,7 @@ */ plugins { - id "com.gradle.enterprise" version "3.10.2" + id "com.gradle.enterprise" version "3.10.3" } buildCache { From cc10b97b955f7e6a5063c099f97d12ebbf6b5ac4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?I=C3=B1aki=20Villar?= Date: Mon, 18 Jul 2022 14:09:48 -0700 Subject: [PATCH 018/104] Build performance improvements (#3926) * set appropiate jdk boolean for buildNoJdkRpm Signed-off-by: Inaki Villar * make missingJavadoc cacheable Signed-off-by: Inaki Villar * using relativepath for javadoc.options.linksOffline Signed-off-by: Inaki Villar --- build.gradle | 5 +++-- distribution/packages/build.gradle | 2 +- gradle/missing-javadoc.gradle | 6 ++++-- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/build.gradle b/build.gradle index f56b375883b70..ceb9ce395c3c7 100644 --- a/build.gradle +++ b/build.gradle @@ -320,7 +320,7 @@ allprojects { task.reproducibleFileOrder = true if (task instanceof SymbolicLinkPreservingTar) { // Replace file timestamps with latest Git revision date (if available) - task.lastModifiedTimestamp = gitRevisionDate + task.lastModifiedTimestamp = gitRevisionDate } } @@ -359,7 +359,8 @@ allprojects { project.javadoc.dependsOn "${upstreamProject.path}:javadoc" String externalLinkName = upstreamProject.archivesBaseName String artifactPath = dep.group.replaceAll('\\.', '/') + '/' + externalLinkName.replaceAll('\\.', '/') + '/' + dep.version - project.javadoc.options.linksOffline artifactsHost + "/javadoc/" + artifactPath, "${upstreamProject.buildDir}/docs/javadoc/" + String projectRelativePath = project.relativePath(upstreamProject.buildDir) + project.javadoc.options.linksOffline artifactsHost + "/javadoc/" + artifactPath, "${projectRelativePath}/docs/javadoc/" } } boolean hasShadow = project.plugins.hasPlugin(ShadowPlugin) diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index cd0cf6b9db64d..df3049d7684c4 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -392,7 +392,7 @@ tasks.register('buildRpm', Rpm) { } tasks.register('buildNoJdkRpm', Rpm) { - configure(commonRpmConfig(true, 'x64')) + configure(commonRpmConfig(false, 'x64')) } Closure dpkgExists = { it -> new File('/bin/dpkg-deb').exists() || new File('/usr/bin/dpkg-deb').exists() || new File('/usr/local/bin/dpkg-deb').exists() } diff --git a/gradle/missing-javadoc.gradle b/gradle/missing-javadoc.gradle index d927bc8a2d09a..6b3dacd3e905a 100644 --- a/gradle/missing-javadoc.gradle +++ b/gradle/missing-javadoc.gradle @@ -70,7 +70,7 @@ allprojects { classpath = sourceSets.main.compileClasspath srcDirSet = sourceSets.main.java - outputDir = project.javadoc.destinationDir + outputDir = file("${project.buildDir}/tmp/missingJavadoc/") } } } @@ -183,6 +183,7 @@ configure(project(":server")) { } } +@CacheableTask class MissingJavadocTask extends DefaultTask { @InputFiles @SkipWhenEmpty @@ -227,7 +228,8 @@ class MissingJavadocTask extends DefaultTask { @Input def executable - @Input + @InputFiles + @PathSensitive(PathSensitivity.RELATIVE) def taskResources /** Utility method to recursively collect all tasks with same name like this one that we depend on */ From 10208524f56e3ca8c97b871509b4bb1e0431f442 Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Mon, 18 Jul 2022 14:14:13 -0700 Subject: [PATCH 019/104] PR coverage requirement and default settings (#3931) Signed-off-by: Suraj Singh --- codecov.yml | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 codecov.yml diff --git a/codecov.yml b/codecov.yml new file mode 100644 index 0000000000000..7c38e4e63325b --- /dev/null +++ b/codecov.yml @@ -0,0 +1,12 @@ +codecov: + require_ci_to_pass: yes + +coverage: + precision: 2 + round: down + range: "70...100" + status: + project: + default: + target: 70% # the required coverage value + threshold: 1% # the leniency in hitting the target From 07775ff69d27b8b58b804bfd93a8d4511c355788 Mon Sep 17 00:00:00 2001 From: Sarat Vemulapalli Date: Mon, 18 Jul 2022 14:31:26 -0700 Subject: [PATCH 020/104] Adding missing Java docs for new Translog implementation (#3936) Signed-off-by: Sarat Vemulapalli --- .../index/translog/InternalTranslogManager.java | 12 ++++++------ .../opensearch/index/translog/TranslogManager.java | 6 +++--- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java b/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java index c7fdf1e30e6a1..ac82cf246cc55 100644 --- a/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java +++ b/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java @@ -285,9 +285,9 @@ public void ensureCanFlush() { /** * Reads operations from the translog - * @param location + * @param location location of translog * @return the translog operation - * @throws IOException + * @throws IOException throws an IO exception */ @Override public Translog.Operation readOperation(Translog.Location location) throws IOException { @@ -296,9 +296,9 @@ public Translog.Operation readOperation(Translog.Location location) throws IOExc /** * Adds an operation to the translog - * @param operation + * @param operation operation to add to translog * @return the location in the translog - * @throws IOException + * @throws IOException throws an IO exception */ @Override public Translog.Location add(Translog.Operation operation) throws IOException { @@ -396,8 +396,8 @@ public String getTranslogUUID() { /** * - * @param localCheckpointOfLastCommit - * @param flushThreshold + * @param localCheckpointOfLastCommit local checkpoint reference of last commit to translog + * @param flushThreshold threshold to flush to translog * @return if the translog should be flushed */ public boolean shouldPeriodicallyFlush(long localCheckpointOfLastCommit, long flushThreshold) { diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogManager.java b/server/src/main/java/org/opensearch/index/translog/TranslogManager.java index f82434f40b06c..5353fa3b59124 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogManager.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogManager.java @@ -98,15 +98,15 @@ public interface TranslogManager { * Reads operations for the translog * @param location the location in the translog * @return the translog operation - * @throws IOException + * @throws IOException throws an IO exception when reading the file fails */ Translog.Operation readOperation(Translog.Location location) throws IOException; /** * Adds an operation to the translog - * @param operation + * @param operation to add to translog * @return the location in the translog - * @throws IOException + * @throws IOException throws an IO exception if adding an operation fails */ Translog.Location add(Translog.Operation operation) throws IOException; From 6d7a1521b47ab9dcf7b03491e6e305d4ac1dd4c4 Mon Sep 17 00:00:00 2001 From: Owais Kazi Date: Mon, 18 Jul 2022 15:55:27 -0700 Subject: [PATCH 021/104] Fail build on wildcard imports (#3939) Signed-off-by: Owais Kazi --- .../NanoTimeVsCurrentTimeMillisBenchmark.java | 10 ++++++++- gradle/formatting.gradle | 7 ++++++ .../painless/antlr/PainlessLexer.java | 12 +++++++--- .../painless/antlr/PainlessParser.java | 22 +++++++++++++++---- .../search/profile/query/QueryProfilerIT.java | 12 ++++++++-- .../index/store/RemoteDirectoryTests.java | 5 ++++- .../index/store/RemoteIndexInputTests.java | 9 +++++++- .../index/store/RemoteIndexOutputTests.java | 4 +++- .../listener/TranslogListenerTests.java | 6 ++++- .../indices/ShardLimitValidatorTests.java | 4 +++- .../SegmentFileTransferHandlerTests.java | 12 +++++++++- .../SegmentReplicationSourceServiceTests.java | 3 ++- .../SegmentReplicationTargetServiceTests.java | 8 ++++++- .../PublishCheckpointActionTests.java | 5 ++++- 14 files changed, 100 insertions(+), 19 deletions(-) diff --git a/benchmarks/src/main/java/org/opensearch/benchmark/time/NanoTimeVsCurrentTimeMillisBenchmark.java b/benchmarks/src/main/java/org/opensearch/benchmark/time/NanoTimeVsCurrentTimeMillisBenchmark.java index 4e4662a8f0427..4c87734255b8a 100644 --- a/benchmarks/src/main/java/org/opensearch/benchmark/time/NanoTimeVsCurrentTimeMillisBenchmark.java +++ b/benchmarks/src/main/java/org/opensearch/benchmark/time/NanoTimeVsCurrentTimeMillisBenchmark.java @@ -8,7 +8,15 @@ package org.opensearch.benchmark.time; -import org.openjdk.jmh.annotations.*; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Benchmark; import java.util.concurrent.TimeUnit; diff --git a/gradle/formatting.gradle b/gradle/formatting.gradle index 19adea33b23b3..7cd8c35b5fef7 100644 --- a/gradle/formatting.gradle +++ b/gradle/formatting.gradle @@ -69,6 +69,13 @@ allprojects { eclipse().configFile rootProject.file('buildSrc/formatterConfig.xml') trimTrailingWhitespace() + custom 'Refuse wildcard imports', { + // Wildcard imports can't be resolved; fail the build + if (it =~ /\s+import .*\*;/) { + throw new AssertionError("Do not use wildcard imports. 'spotlessApply' cannot resolve this issue.") + } + } + // See DEVELOPER_GUIDE.md for details of when to enable this. if (System.getProperty('spotless.paddedcell') != null) { paddedCell() diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/PainlessLexer.java b/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/PainlessLexer.java index 60c6f9763bd95..f17a51d4c28f6 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/PainlessLexer.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/PainlessLexer.java @@ -35,10 +35,16 @@ import org.antlr.v4.runtime.Lexer; import org.antlr.v4.runtime.CharStream; -import org.antlr.v4.runtime.*; -import org.antlr.v4.runtime.atn.*; +import org.antlr.v4.runtime.RuntimeMetaData; +import org.antlr.v4.runtime.Vocabulary; +import org.antlr.v4.runtime.VocabularyImpl; +import org.antlr.v4.runtime.RuleContext; +import org.antlr.v4.runtime.atn.ATN; +import org.antlr.v4.runtime.atn.ATNDeserializer; +import org.antlr.v4.runtime.atn.LexerATNSimulator; +import org.antlr.v4.runtime.atn.PredictionContextCache; + import org.antlr.v4.runtime.dfa.DFA; -import org.antlr.v4.runtime.misc.*; @SuppressWarnings({ "all", "warnings", "unchecked", "unused", "cast" }) abstract class PainlessLexer extends Lexer { diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/PainlessParser.java b/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/PainlessParser.java index 03cf58f336d10..1a083750aa36b 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/PainlessParser.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/PainlessParser.java @@ -32,11 +32,25 @@ */ package org.opensearch.painless.antlr; -import org.antlr.v4.runtime.atn.*; import org.antlr.v4.runtime.dfa.DFA; -import org.antlr.v4.runtime.*; -import org.antlr.v4.runtime.misc.*; -import org.antlr.v4.runtime.tree.*; +import org.antlr.v4.runtime.atn.PredictionContextCache; +import org.antlr.v4.runtime.atn.ParserATNSimulator; +import org.antlr.v4.runtime.atn.ATN; +import org.antlr.v4.runtime.atn.ATNDeserializer; +import org.antlr.v4.runtime.dfa.DFA; +import org.antlr.v4.runtime.Parser; +import org.antlr.v4.runtime.RuntimeMetaData; +import org.antlr.v4.runtime.Vocabulary; +import org.antlr.v4.runtime.VocabularyImpl; +import org.antlr.v4.runtime.RecognitionException; +import org.antlr.v4.runtime.TokenStream; +import org.antlr.v4.runtime.ParserRuleContext; +import org.antlr.v4.runtime.Token; +import org.antlr.v4.runtime.NoViableAltException; +import org.antlr.v4.runtime.FailedPredicateException; +import org.antlr.v4.runtime.RuleContext; +import org.antlr.v4.runtime.tree.TerminalNode; +import org.antlr.v4.runtime.tree.ParseTreeVisitor; import java.util.List; @SuppressWarnings({ "all", "warnings", "unchecked", "unused", "cast" }) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java index a74f359f2542e..8601e2b6d6be9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java @@ -34,7 +34,11 @@ import org.apache.lucene.tests.util.English; import org.opensearch.action.index.IndexRequestBuilder; -import org.opensearch.action.search.*; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.search.MultiSearchResponse; +import org.opensearch.action.search.SearchType; +import org.opensearch.action.search.SearchRequestBuilder; +import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.common.settings.Settings; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryBuilders; @@ -44,7 +48,11 @@ import org.opensearch.search.sort.SortOrder; import org.opensearch.test.OpenSearchIntegTestCase; -import java.util.*; +import java.util.List; +import java.util.Arrays; +import java.util.Map; +import java.util.HashSet; +import java.util.Set; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.greaterThan; diff --git a/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java index 76d4d50022042..2ded77d2cecfd 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java @@ -26,7 +26,10 @@ import java.util.stream.Collectors; import java.util.stream.Stream; -import static org.mockito.Mockito.*; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.doThrow; public class RemoteDirectoryTests extends OpenSearchTestCase { private BlobContainer blobContainer; diff --git a/server/src/test/java/org/opensearch/index/store/RemoteIndexInputTests.java b/server/src/test/java/org/opensearch/index/store/RemoteIndexInputTests.java index e7fe2f9798453..273d3c7e37c56 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteIndexInputTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteIndexInputTests.java @@ -14,7 +14,14 @@ import java.io.IOException; import java.io.InputStream; -import static org.mockito.Mockito.*; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyInt; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.doThrow; public class RemoteIndexInputTests extends OpenSearchTestCase { diff --git a/server/src/test/java/org/opensearch/index/store/RemoteIndexOutputTests.java b/server/src/test/java/org/opensearch/index/store/RemoteIndexOutputTests.java index 64975f2ac4892..e7eb3231bf87d 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteIndexOutputTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteIndexOutputTests.java @@ -18,7 +18,9 @@ import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.*; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.doThrow; public class RemoteIndexOutputTests extends OpenSearchTestCase { private static final String FILENAME = "segment_1"; diff --git a/server/src/test/java/org/opensearch/index/translog/listener/TranslogListenerTests.java b/server/src/test/java/org/opensearch/index/translog/listener/TranslogListenerTests.java index 062801fc43d2f..1f28e32a6dbec 100644 --- a/server/src/test/java/org/opensearch/index/translog/listener/TranslogListenerTests.java +++ b/server/src/test/java/org/opensearch/index/translog/listener/TranslogListenerTests.java @@ -13,7 +13,11 @@ import org.opensearch.test.OpenSearchTestCase; import java.lang.reflect.Proxy; -import java.util.*; +import java.util.List; +import java.util.Collections; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.LinkedList; import java.util.concurrent.atomic.AtomicInteger; public class TranslogListenerTests extends OpenSearchTestCase { diff --git a/server/src/test/java/org/opensearch/indices/ShardLimitValidatorTests.java b/server/src/test/java/org/opensearch/indices/ShardLimitValidatorTests.java index a61ca13df0215..60c467e3864e9 100644 --- a/server/src/test/java/org/opensearch/indices/ShardLimitValidatorTests.java +++ b/server/src/test/java/org/opensearch/indices/ShardLimitValidatorTests.java @@ -53,7 +53,9 @@ import java.util.stream.Collectors; import static java.util.Collections.emptyMap; -import static org.opensearch.cluster.metadata.IndexMetadata.*; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_VERSION_CREATED; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.MetadataIndexStateServiceTests.addClosedIndex; import static org.opensearch.cluster.metadata.MetadataIndexStateServiceTests.addOpenedIndex; import static org.opensearch.cluster.shards.ShardCounts.forDataNodeCount; diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentFileTransferHandlerTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentFileTransferHandlerTests.java index 5fd8bc1e74625..e13b4ed31fc39 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentFileTransferHandlerTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentFileTransferHandlerTests.java @@ -31,7 +31,17 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; -import static org.mockito.Mockito.*; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyBoolean; +import static org.mockito.Mockito.anyLong; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.anyInt; +import static org.mockito.Mockito.verifyNoInteractions; +import static org.mockito.Mockito.doNothing; public class SegmentFileTransferHandlerTests extends IndexShardTestCase { diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java index 8d2ca9ff63f3d..4bfdd81d50a1e 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java @@ -33,7 +33,8 @@ import java.util.Collections; import java.util.concurrent.TimeUnit; -import static org.mockito.Mockito.*; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class SegmentReplicationSourceServiceTests extends OpenSearchTestCase { diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java index 33734fe85def5..ef67bd665dedf 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -25,7 +25,13 @@ import java.io.IOException; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.*; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.spy; public class SegmentReplicationTargetServiceTests extends IndexShardTestCase { diff --git a/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java b/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java index 77cc1d744f0dc..23fad53dd1201 100644 --- a/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java @@ -33,7 +33,10 @@ import java.util.concurrent.atomic.AtomicBoolean; import static org.hamcrest.Matchers.sameInstance; -import static org.mockito.Mockito.*; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.times; import static org.opensearch.test.ClusterServiceUtils.createClusterService; public class PublishCheckpointActionTests extends OpenSearchTestCase { From 55eb86df0dc11dbfd11452fb837f89ab96b537be Mon Sep 17 00:00:00 2001 From: Bharathwaj G <58062316+bharath-techie@users.noreply.github.com> Date: Tue, 19 Jul 2022 18:08:29 +0530 Subject: [PATCH 022/104] Create pit service layer changes (#3921) * Create pit service layer changes Signed-off-by: Bharathwaj G --- .../search/searchafter/SearchAfterIT.java | 58 ++ .../search/slice/SearchSliceIT.java | 86 ++- .../org/opensearch/action/ActionModule.java | 5 + .../action/search/CreatePitAction.java | 23 + .../action/search/CreatePitController.java | 255 +++++++++ .../action/search/CreatePitRequest.java | 195 +++++++ .../action/search/CreatePitResponse.java | 232 ++++++++ .../search/PitSearchContextIdForNode.java | 50 ++ .../action/search/SearchContextId.java | 2 +- .../action/search/SearchContextIdForNode.java | 2 +- .../action/search/SearchTransportService.java | 74 +++ .../opensearch/action/search/SearchUtils.java | 44 ++ .../search/TransportCreatePitAction.java | 133 +++++ .../action/search/TransportSearchAction.java | 76 +++ .../search/UpdatePitContextRequest.java | 67 +++ .../search/UpdatePitContextResponse.java | 58 ++ .../java/org/opensearch/client/Client.java | 7 + .../client/support/AbstractClient.java | 8 + .../common/settings/ClusterSettings.java | 4 + .../common/settings/IndexScopedSettings.java | 1 + .../org/opensearch/index/IndexSettings.java | 29 +- .../index/shard/SearchOperationListener.java | 44 ++ .../search/DefaultSearchContext.java | 20 +- .../org/opensearch/search/SearchService.java | 160 +++++- .../search/internal/PitReaderContext.java | 70 +++ .../search/internal/ReaderContext.java | 15 +- .../search/CreatePitControllerTests.java | 460 ++++++++++++++++ .../action/search/PitTestsUtil.java | 84 +++ .../search/CreatePitMultiNodeTests.java | 202 +++++++ .../search/CreatePitSingleNodeTests.java | 501 ++++++++++++++++++ .../search/DefaultSearchContextTests.java | 56 +- .../opensearch/search/SearchServiceTests.java | 100 +++- 32 files changed, 3102 insertions(+), 19 deletions(-) create mode 100644 server/src/main/java/org/opensearch/action/search/CreatePitAction.java create mode 100644 server/src/main/java/org/opensearch/action/search/CreatePitController.java create mode 100644 server/src/main/java/org/opensearch/action/search/CreatePitRequest.java create mode 100644 server/src/main/java/org/opensearch/action/search/CreatePitResponse.java create mode 100644 server/src/main/java/org/opensearch/action/search/PitSearchContextIdForNode.java create mode 100644 server/src/main/java/org/opensearch/action/search/SearchUtils.java create mode 100644 server/src/main/java/org/opensearch/action/search/TransportCreatePitAction.java create mode 100644 server/src/main/java/org/opensearch/action/search/UpdatePitContextRequest.java create mode 100644 server/src/main/java/org/opensearch/action/search/UpdatePitContextResponse.java create mode 100644 server/src/main/java/org/opensearch/search/internal/PitReaderContext.java create mode 100644 server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java create mode 100644 server/src/test/java/org/opensearch/action/search/PitTestsUtil.java create mode 100644 server/src/test/java/org/opensearch/search/CreatePitMultiNodeTests.java create mode 100644 server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java diff --git a/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java b/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java index 926e21294ffc8..f33543e1114cb 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java @@ -32,15 +32,21 @@ package org.opensearch.search.searchafter; +import org.opensearch.action.ActionFuture; import org.opensearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.action.search.CreatePitAction; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.common.UUIDs; +import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.search.SearchHit; +import org.opensearch.search.builder.PointInTimeBuilder; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.OpenSearchIntegTestCase; import org.hamcrest.Matchers; @@ -155,6 +161,58 @@ public void testsShouldFail() throws Exception { } } + public void testPitWithSearchAfter() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test").setMapping("field1", "type=long", "field2", "type=keyword").get()); + ensureGreen(); + indexRandom( + true, + client().prepareIndex("test").setId("0").setSource("field1", 0), + client().prepareIndex("test").setId("1").setSource("field1", 100, "field2", "toto"), + client().prepareIndex("test").setId("2").setSource("field1", 101), + client().prepareIndex("test").setId("3").setSource("field1", 99) + ); + + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "test" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + SearchResponse sr = client().prepareSearch() + .addSort("field1", SortOrder.ASC) + .setQuery(matchAllQuery()) + .searchAfter(new Object[] { 99 }) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .get(); + assertEquals(2, sr.getHits().getHits().length); + sr = client().prepareSearch() + .addSort("field1", SortOrder.ASC) + .setQuery(matchAllQuery()) + .searchAfter(new Object[] { 100 }) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .get(); + assertEquals(1, sr.getHits().getHits().length); + sr = client().prepareSearch() + .addSort("field1", SortOrder.ASC) + .setQuery(matchAllQuery()) + .searchAfter(new Object[] { 0 }) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .get(); + assertEquals(3, sr.getHits().getHits().length); + /** + * Add new data and assert PIT results remain the same and normal search results gets refreshed + */ + indexRandom(true, client().prepareIndex("test").setId("4").setSource("field1", 102)); + sr = client().prepareSearch() + .addSort("field1", SortOrder.ASC) + .setQuery(matchAllQuery()) + .searchAfter(new Object[] { 0 }) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .get(); + assertEquals(3, sr.getHits().getHits().length); + sr = client().prepareSearch().addSort("field1", SortOrder.ASC).setQuery(matchAllQuery()).searchAfter(new Object[] { 0 }).get(); + assertEquals(4, sr.getHits().getHits().length); + client().admin().indices().prepareDelete("test").get(); + } + public void testWithNullStrings() throws InterruptedException { assertAcked(client().admin().indices().prepareCreate("test").setMapping("field2", "type=keyword").get()); ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/slice/SearchSliceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/slice/SearchSliceIT.java index 9c735c42052e3..eacbcc42a8157 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/slice/SearchSliceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/slice/SearchSliceIT.java @@ -32,9 +32,13 @@ package org.opensearch.search.slice; +import org.opensearch.action.ActionFuture; import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest; import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.action.search.CreatePitAction; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; @@ -46,6 +50,7 @@ import org.opensearch.search.Scroll; import org.opensearch.search.SearchException; import org.opensearch.search.SearchHit; +import org.opensearch.search.builder.PointInTimeBuilder; import org.opensearch.search.sort.SortBuilders; import org.opensearch.test.OpenSearchIntegTestCase; @@ -86,7 +91,12 @@ private void setupIndex(int numDocs, int numberOfShards) throws IOException, Exe client().admin() .indices() .prepareCreate("test") - .setSettings(Settings.builder().put("number_of_shards", numberOfShards).put("index.max_slices_per_scroll", 10000)) + .setSettings( + Settings.builder() + .put("number_of_shards", numberOfShards) + .put("index.max_slices_per_scroll", 10000) + .put("index.max_slices_per_pit", 10000) + ) .setMapping(mapping) ); ensureGreen(); @@ -129,6 +139,78 @@ public void testSearchSort() throws Exception { } } + public void testSearchSortWithoutPitOrScroll() throws Exception { + int numShards = randomIntBetween(1, 7); + int numDocs = randomIntBetween(100, 1000); + setupIndex(numDocs, numShards); + int fetchSize = randomIntBetween(10, 100); + SearchRequestBuilder request = client().prepareSearch("test") + .setQuery(matchAllQuery()) + .setSize(fetchSize) + .addSort(SortBuilders.fieldSort("_doc")); + SliceBuilder sliceBuilder = new SliceBuilder("_id", 0, 4); + SearchPhaseExecutionException ex = expectThrows(SearchPhaseExecutionException.class, () -> request.slice(sliceBuilder).get()); + assertTrue(ex.getMessage().contains("all shards failed")); + } + + public void testSearchSortWithPIT() throws Exception { + int numShards = randomIntBetween(1, 7); + int numDocs = randomIntBetween(100, 1000); + setupIndex(numDocs, numShards); + int max = randomIntBetween(2, numShards * 3); + CreatePitRequest pitRequest = new CreatePitRequest(TimeValue.timeValueDays(1), true); + pitRequest.setIndices(new String[] { "test" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, pitRequest); + CreatePitResponse pitResponse = execute.get(); + for (String field : new String[] { "_id", "random_int", "static_int" }) { + int fetchSize = randomIntBetween(10, 100); + + // test _doc sort + SearchRequestBuilder request = client().prepareSearch("test") + .setQuery(matchAllQuery()) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .setSize(fetchSize) + .addSort(SortBuilders.fieldSort("_doc")); + assertSearchSlicesWithPIT(request, field, max, numDocs); + + // test numeric sort + request = client().prepareSearch("test") + .setQuery(matchAllQuery()) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .setSize(fetchSize) + .addSort(SortBuilders.fieldSort("random_int")); + assertSearchSlicesWithPIT(request, field, max, numDocs); + } + client().admin().indices().prepareDelete("test").get(); + } + + private void assertSearchSlicesWithPIT(SearchRequestBuilder request, String field, int numSlice, int numDocs) { + int totalResults = 0; + List keys = new ArrayList<>(); + for (int id = 0; id < numSlice; id++) { + SliceBuilder sliceBuilder = new SliceBuilder(field, id, numSlice); + SearchResponse searchResponse = request.slice(sliceBuilder).setFrom(0).get(); + totalResults += searchResponse.getHits().getHits().length; + int expectedSliceResults = (int) searchResponse.getHits().getTotalHits().value; + int numSliceResults = searchResponse.getHits().getHits().length; + for (SearchHit hit : searchResponse.getHits().getHits()) { + assertTrue(keys.add(hit.getId())); + } + while (searchResponse.getHits().getHits().length > 0) { + searchResponse = request.setFrom(numSliceResults).slice(sliceBuilder).get(); + totalResults += searchResponse.getHits().getHits().length; + numSliceResults += searchResponse.getHits().getHits().length; + for (SearchHit hit : searchResponse.getHits().getHits()) { + assertTrue(keys.add(hit.getId())); + } + } + assertThat(numSliceResults, equalTo(expectedSliceResults)); + } + assertThat(totalResults, equalTo(numDocs)); + assertThat(keys.size(), equalTo(numDocs)); + assertThat(new HashSet(keys).size(), equalTo(numDocs)); + } + public void testWithPreferenceAndRoutings() throws Exception { int numShards = 10; int totalDocs = randomIntBetween(100, 1000); @@ -217,7 +299,7 @@ public void testInvalidQuery() throws Exception { ); Throwable rootCause = findRootCause(exc); assertThat(rootCause.getClass(), equalTo(SearchException.class)); - assertThat(rootCause.getMessage(), equalTo("`slice` cannot be used outside of a scroll context")); + assertThat(rootCause.getMessage(), equalTo("`slice` cannot be used outside of a scroll context or PIT context")); } private void assertSearchSlicesWithScroll(SearchRequestBuilder request, String field, int numSlice, int numDocs) { diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index 2a3c82991d9bb..cf79c97e4ca64 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -232,10 +232,12 @@ import org.opensearch.action.main.MainAction; import org.opensearch.action.main.TransportMainAction; import org.opensearch.action.search.ClearScrollAction; +import org.opensearch.action.search.CreatePitAction; import org.opensearch.action.search.MultiSearchAction; import org.opensearch.action.search.SearchAction; import org.opensearch.action.search.SearchScrollAction; import org.opensearch.action.search.TransportClearScrollAction; +import org.opensearch.action.search.TransportCreatePitAction; import org.opensearch.action.search.TransportMultiSearchAction; import org.opensearch.action.search.TransportSearchAction; import org.opensearch.action.search.TransportSearchScrollAction; @@ -657,6 +659,9 @@ public void reg actions.register(DeleteDanglingIndexAction.INSTANCE, TransportDeleteDanglingIndexAction.class); actions.register(FindDanglingIndexAction.INSTANCE, TransportFindDanglingIndexAction.class); + // point in time actions + actions.register(CreatePitAction.INSTANCE, TransportCreatePitAction.class); + return unmodifiableMap(actions.getRegistry()); } diff --git a/server/src/main/java/org/opensearch/action/search/CreatePitAction.java b/server/src/main/java/org/opensearch/action/search/CreatePitAction.java new file mode 100644 index 0000000000000..7ffa30a182458 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/CreatePitAction.java @@ -0,0 +1,23 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.ActionType; + +/** + * Action type for creating PIT reader context + */ +public class CreatePitAction extends ActionType { + public static final CreatePitAction INSTANCE = new CreatePitAction(); + public static final String NAME = "indices:data/read/point_in_time/create"; + + private CreatePitAction() { + super(NAME, CreatePitResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/CreatePitController.java b/server/src/main/java/org/opensearch/action/search/CreatePitController.java new file mode 100644 index 0000000000000..40ee810186eb2 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/CreatePitController.java @@ -0,0 +1,255 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.OpenSearchException; +import org.opensearch.action.ActionListener; +import org.opensearch.action.StepListener; +import org.opensearch.action.support.GroupedActionListener; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Strings; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.index.shard.ShardId; +import org.opensearch.search.SearchPhaseResult; +import org.opensearch.search.SearchShardTarget; +import org.opensearch.tasks.Task; +import org.opensearch.transport.Transport; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.Set; +import java.util.function.BiFunction; +import java.util.stream.Collectors; + +import static org.opensearch.common.unit.TimeValue.timeValueSeconds; + +/** + * Controller for creating PIT reader context + * Phase 1 of create PIT request : Create PIT reader contexts in the associated shards with a temporary keep alive + * Phase 2 of create PIT : Update PIT reader context with PIT ID and keep alive from request and + * fail user request if any of the updates in this phase are failed - we clean up PITs in case of such failures. + * This two phase approach is used to save PIT ID as part of context which is later used for other use cases like list PIT etc. + */ +public class CreatePitController { + private final SearchTransportService searchTransportService; + private final ClusterService clusterService; + private final TransportSearchAction transportSearchAction; + private final NamedWriteableRegistry namedWriteableRegistry; + private static final Logger logger = LogManager.getLogger(CreatePitController.class); + public static final Setting PIT_INIT_KEEP_ALIVE = Setting.positiveTimeSetting( + "point_in_time.init.keep_alive", + timeValueSeconds(30), + Setting.Property.NodeScope + ); + + @Inject + public CreatePitController( + SearchTransportService searchTransportService, + ClusterService clusterService, + TransportSearchAction transportSearchAction, + NamedWriteableRegistry namedWriteableRegistry + ) { + this.searchTransportService = searchTransportService; + this.clusterService = clusterService; + this.transportSearchAction = transportSearchAction; + this.namedWriteableRegistry = namedWriteableRegistry; + } + + /** + * This method creates PIT reader context + */ + public void executeCreatePit( + CreatePitRequest request, + Task task, + StepListener createPitListener, + ActionListener updatePitIdListener + ) { + SearchRequest searchRequest = new SearchRequest(request.getIndices()); + searchRequest.preference(request.getPreference()); + searchRequest.routing(request.getRouting()); + searchRequest.indicesOptions(request.getIndicesOptions()); + searchRequest.allowPartialSearchResults(request.shouldAllowPartialPitCreation()); + SearchTask searchTask = searchRequest.createTask( + task.getId(), + task.getType(), + task.getAction(), + task.getParentTaskId(), + Collections.emptyMap() + ); + /** + * Phase 1 of create PIT + */ + executeCreatePit(searchTask, searchRequest, createPitListener); + + /** + * Phase 2 of create PIT where we update pit id in pit contexts + */ + createPitListener.whenComplete( + searchResponse -> { executeUpdatePitId(request, searchRequest, searchResponse, updatePitIdListener); }, + updatePitIdListener::onFailure + ); + } + + /** + * Creates PIT reader context with temporary keep alive + */ + void executeCreatePit(Task task, SearchRequest searchRequest, StepListener createPitListener) { + logger.debug( + () -> new ParameterizedMessage("Executing creation of PIT context for indices [{}]", Arrays.toString(searchRequest.indices())) + ); + transportSearchAction.executeRequest( + task, + searchRequest, + TransportCreatePitAction.CREATE_PIT_ACTION, + true, + new TransportSearchAction.SinglePhaseSearchAction() { + @Override + public void executeOnShardTarget( + SearchTask searchTask, + SearchShardTarget target, + Transport.Connection connection, + ActionListener searchPhaseResultActionListener + ) { + searchTransportService.createPitContext( + connection, + new TransportCreatePitAction.CreateReaderContextRequest( + target.getShardId(), + PIT_INIT_KEEP_ALIVE.get(clusterService.getSettings()) + ), + searchTask, + ActionListener.wrap(r -> searchPhaseResultActionListener.onResponse(r), searchPhaseResultActionListener::onFailure) + ); + } + }, + createPitListener + ); + } + + /** + * Updates PIT ID, keep alive and createdTime of PIT reader context + */ + void executeUpdatePitId( + CreatePitRequest request, + SearchRequest searchRequest, + SearchResponse searchResponse, + ActionListener updatePitIdListener + ) { + logger.debug( + () -> new ParameterizedMessage( + "Updating PIT context with PIT ID [{}], creation time and keep alive", + searchResponse.pointInTimeId() + ) + ); + /** + * store the create time ( same create time for all PIT contexts across shards ) to be used + * for list PIT api + */ + final long relativeStartNanos = System.nanoTime(); + final TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider( + searchRequest.getOrCreateAbsoluteStartMillis(), + relativeStartNanos, + System::nanoTime + ); + final long creationTime = timeProvider.getAbsoluteStartMillis(); + CreatePitResponse createPITResponse = new CreatePitResponse( + searchResponse.pointInTimeId(), + creationTime, + searchResponse.getTotalShards(), + searchResponse.getSuccessfulShards(), + searchResponse.getSkippedShards(), + searchResponse.getFailedShards(), + searchResponse.getShardFailures() + ); + SearchContextId contextId = SearchContextId.decode(namedWriteableRegistry, createPITResponse.getId()); + final StepListener> lookupListener = getConnectionLookupListener(contextId); + lookupListener.whenComplete(nodelookup -> { + final ActionListener groupedActionListener = getGroupedListener( + updatePitIdListener, + createPITResponse, + contextId.shards().size(), + contextId.shards().values() + ); + for (Map.Entry entry : contextId.shards().entrySet()) { + DiscoveryNode node = nodelookup.apply(entry.getValue().getClusterAlias(), entry.getValue().getNode()); + try { + final Transport.Connection connection = searchTransportService.getConnection(entry.getValue().getClusterAlias(), node); + searchTransportService.updatePitContext( + connection, + new UpdatePitContextRequest( + entry.getValue().getSearchContextId(), + createPITResponse.getId(), + request.getKeepAlive().millis(), + creationTime + ), + groupedActionListener + ); + } catch (Exception e) { + logger.error( + () -> new ParameterizedMessage( + "Create pit update phase failed for PIT ID [{}] on node [{}]", + searchResponse.pointInTimeId(), + node + ), + e + ); + groupedActionListener.onFailure( + new OpenSearchException( + "Create pit update phase for PIT ID [" + searchResponse.pointInTimeId() + "] failed on node[" + node + "]", + e + ) + ); + } + } + }, updatePitIdListener::onFailure); + } + + private StepListener> getConnectionLookupListener(SearchContextId contextId) { + ClusterState state = clusterService.state(); + final Set clusters = contextId.shards() + .values() + .stream() + .filter(ctx -> Strings.isEmpty(ctx.getClusterAlias()) == false) + .map(SearchContextIdForNode::getClusterAlias) + .collect(Collectors.toSet()); + return (StepListener>) SearchUtils.getConnectionLookupListener( + searchTransportService.getRemoteClusterService(), + state, + clusters + ); + } + + private ActionListener getGroupedListener( + ActionListener updatePitIdListener, + CreatePitResponse createPITResponse, + int size, + Collection contexts + ) { + return new GroupedActionListener<>(new ActionListener<>() { + @Override + public void onResponse(final Collection responses) { + updatePitIdListener.onResponse(createPITResponse); + } + + @Override + public void onFailure(final Exception e) { + updatePitIdListener.onFailure(e); + } + }, size); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/CreatePitRequest.java b/server/src/main/java/org/opensearch/action/search/CreatePitRequest.java new file mode 100644 index 0000000000000..45d6d9e2c9f54 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/CreatePitRequest.java @@ -0,0 +1,195 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.ActionRequest; +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.IndicesRequest; +import org.opensearch.action.support.IndicesOptions; +import org.opensearch.common.Nullable; +import org.opensearch.common.Strings; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.xcontent.ToXContent; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.tasks.Task; +import org.opensearch.tasks.TaskId; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +import static org.opensearch.action.ValidateActions.addValidationError; + +/** + * A request to make create point in time against one or more indices. + */ +public class CreatePitRequest extends ActionRequest implements IndicesRequest.Replaceable, ToXContent { + + // keep alive for pit reader context + private TimeValue keepAlive; + + // this describes whether PIT can be created with partial failures + private Boolean allowPartialPitCreation; + @Nullable + private String routing = null; + @Nullable + private String preference = null; + private String[] indices = Strings.EMPTY_ARRAY; + private IndicesOptions indicesOptions = SearchRequest.DEFAULT_INDICES_OPTIONS; + + public CreatePitRequest(TimeValue keepAlive, Boolean allowPartialPitCreation, String... indices) { + this.keepAlive = keepAlive; + this.allowPartialPitCreation = allowPartialPitCreation; + this.indices = indices; + } + + public CreatePitRequest(StreamInput in) throws IOException { + super(in); + indices = in.readStringArray(); + indicesOptions = IndicesOptions.readIndicesOptions(in); + routing = in.readOptionalString(); + preference = in.readOptionalString(); + keepAlive = in.readTimeValue(); + allowPartialPitCreation = in.readOptionalBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(indices); + indicesOptions.writeIndicesOptions(out); + out.writeOptionalString(routing); + out.writeOptionalString(preference); + out.writeTimeValue(keepAlive); + out.writeOptionalBoolean(allowPartialPitCreation); + } + + public String getRouting() { + return routing; + } + + public String getPreference() { + return preference; + } + + public String[] getIndices() { + return indices; + } + + public IndicesOptions getIndicesOptions() { + return indicesOptions; + } + + public TimeValue getKeepAlive() { + return keepAlive; + } + + /** + * Sets if this request should allow partial results. + */ + public void allowPartialPitCreation(Boolean allowPartialPitCreation) { + this.allowPartialPitCreation = allowPartialPitCreation; + } + + public boolean shouldAllowPartialPitCreation() { + return allowPartialPitCreation; + } + + public void setRouting(String routing) { + this.routing = routing; + } + + public void setPreference(String preference) { + this.preference = preference; + } + + public void setIndices(String[] indices) { + this.indices = indices; + } + + public void setIndicesOptions(IndicesOptions indicesOptions) { + this.indicesOptions = Objects.requireNonNull(indicesOptions, "indicesOptions must not be null"); + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (keepAlive == null) { + validationException = addValidationError("keep alive not specified", validationException); + } + return validationException; + } + + @Override + public String[] indices() { + return indices; + } + + @Override + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + public CreatePitRequest indicesOptions(IndicesOptions indicesOptions) { + this.indicesOptions = Objects.requireNonNull(indicesOptions, "indicesOptions must not be null"); + return this; + } + + public void setKeepAlive(TimeValue keepAlive) { + this.keepAlive = keepAlive; + } + + public final String buildDescription() { + StringBuilder sb = new StringBuilder(); + sb.append("indices["); + Strings.arrayToDelimitedString(indices, ",", sb); + sb.append("], "); + sb.append("pointintime[").append(keepAlive).append("], "); + sb.append("allowPartialPitCreation[").append(allowPartialPitCreation).append("], "); + return sb.toString(); + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new Task(id, type, action, this.buildDescription(), parentTaskId, headers); + } + + private void validateIndices(String... indices) { + Objects.requireNonNull(indices, "indices must not be null"); + for (String index : indices) { + Objects.requireNonNull(index, "index must not be null"); + } + } + + @Override + public CreatePitRequest indices(String... indices) { + validateIndices(indices); + this.indices = indices; + return this; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field("keep_alive", keepAlive); + builder.field("allow_partial_pit_creation", allowPartialPitCreation); + if (indices != null) { + builder.startArray("indices"); + for (String index : indices) { + builder.value(index); + } + builder.endArray(); + } + if (indicesOptions != null) { + indicesOptions.toXContent(builder, params); + } + return builder; + } +} diff --git a/server/src/main/java/org/opensearch/action/search/CreatePitResponse.java b/server/src/main/java/org/opensearch/action/search/CreatePitResponse.java new file mode 100644 index 0000000000000..25eb9aff9e3d7 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/CreatePitResponse.java @@ -0,0 +1,232 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.ActionResponse; +import org.opensearch.common.ParseField; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.xcontent.StatusToXContentObject; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.rest.RestStatus; +import org.opensearch.rest.action.RestActions; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken; + +/** + * Create point in time response with point in time id and shard success / failures + */ +public class CreatePitResponse extends ActionResponse implements StatusToXContentObject { + private static final ParseField ID = new ParseField("id"); + private static final ParseField CREATION_TIME = new ParseField("creation_time"); + + // point in time id + private final String id; + private final int totalShards; + private final int successfulShards; + private final int failedShards; + private final int skippedShards; + private final ShardSearchFailure[] shardFailures; + private final long creationTime; + + public CreatePitResponse(StreamInput in) throws IOException { + super(in); + id = in.readString(); + totalShards = in.readVInt(); + successfulShards = in.readVInt(); + failedShards = in.readVInt(); + skippedShards = in.readVInt(); + creationTime = in.readLong(); + int size = in.readVInt(); + if (size == 0) { + shardFailures = ShardSearchFailure.EMPTY_ARRAY; + } else { + shardFailures = new ShardSearchFailure[size]; + for (int i = 0; i < shardFailures.length; i++) { + shardFailures[i] = ShardSearchFailure.readShardSearchFailure(in); + } + } + } + + public CreatePitResponse( + String id, + long creationTime, + int totalShards, + int successfulShards, + int skippedShards, + int failedShards, + ShardSearchFailure[] shardFailures + ) { + this.id = id; + this.creationTime = creationTime; + this.totalShards = totalShards; + this.successfulShards = successfulShards; + this.skippedShards = skippedShards; + this.failedShards = failedShards; + this.shardFailures = shardFailures; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(ID.getPreferredName(), id); + RestActions.buildBroadcastShardsHeader( + builder, + params, + getTotalShards(), + getSuccessfulShards(), + getSkippedShards(), + getFailedShards(), + getShardFailures() + ); + builder.field(CREATION_TIME.getPreferredName(), creationTime); + builder.endObject(); + return builder; + } + + /** + * Parse the create PIT response body into a new {@link CreatePitResponse} object + */ + public static CreatePitResponse fromXContent(XContentParser parser) throws IOException { + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); + parser.nextToken(); + return innerFromXContent(parser); + } + + public static CreatePitResponse innerFromXContent(XContentParser parser) throws IOException { + ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.currentToken(), parser); + String currentFieldName = parser.currentName(); + int successfulShards = -1; + int totalShards = -1; + int skippedShards = 0; + int failedShards = 0; + String id = null; + long creationTime = 0; + List failures = new ArrayList<>(); + for (XContentParser.Token token = parser.nextToken(); token != XContentParser.Token.END_OBJECT; token = parser.nextToken()) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + if (CREATION_TIME.match(currentFieldName, parser.getDeprecationHandler())) { + creationTime = parser.longValue(); + } else if (ID.match(currentFieldName, parser.getDeprecationHandler())) { + id = parser.text(); + } else { + parser.skipChildren(); + } + } else if (token == XContentParser.Token.START_OBJECT) { + if (RestActions._SHARDS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + if (RestActions.FAILED_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + failedShards = parser.intValue(); // we don't need it but need to consume it + } else if (RestActions.SUCCESSFUL_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + successfulShards = parser.intValue(); + } else if (RestActions.TOTAL_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + totalShards = parser.intValue(); + } else if (RestActions.SKIPPED_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + skippedShards = parser.intValue(); + } else { + parser.skipChildren(); + } + } else if (token == XContentParser.Token.START_ARRAY) { + if (RestActions.FAILURES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + failures.add(ShardSearchFailure.fromXContent(parser)); + } + } else { + parser.skipChildren(); + } + } else { + parser.skipChildren(); + } + } + } else { + parser.skipChildren(); + } + } + } + + return new CreatePitResponse( + id, + creationTime, + totalShards, + successfulShards, + skippedShards, + failedShards, + failures.toArray(ShardSearchFailure.EMPTY_ARRAY) + ); + } + + public long getCreationTime() { + return creationTime; + } + + /** + * The failed number of shards the search was executed on. + */ + public int getFailedShards() { + return shardFailures.length; + } + + /** + * The failures that occurred during the search. + */ + public ShardSearchFailure[] getShardFailures() { + return this.shardFailures; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(id); + out.writeVInt(totalShards); + out.writeVInt(successfulShards); + out.writeVInt(failedShards); + out.writeVInt(skippedShards); + out.writeLong(creationTime); + out.writeVInt(shardFailures.length); + for (ShardSearchFailure shardSearchFailure : shardFailures) { + shardSearchFailure.writeTo(out); + } + } + + public String getId() { + return id; + } + + /** + * The total number of shards the create pit operation was executed on. + */ + public int getTotalShards() { + return totalShards; + } + + /** + * The successful number of shards the create pit operation was executed on. + */ + public int getSuccessfulShards() { + return successfulShards; + } + + public int getSkippedShards() { + return skippedShards; + } + + @Override + public RestStatus status() { + return RestStatus.status(successfulShards, totalShards, shardFailures); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/PitSearchContextIdForNode.java b/server/src/main/java/org/opensearch/action/search/PitSearchContextIdForNode.java new file mode 100644 index 0000000000000..577a559beb8f9 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/PitSearchContextIdForNode.java @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.Writeable; + +import java.io.IOException; + +/** + * Pit ID along with Id for a search context per node. + * + * @opensearch.internal + */ +public class PitSearchContextIdForNode implements Writeable { + + private final String pitId; + private final SearchContextIdForNode searchContextIdForNode; + + public PitSearchContextIdForNode(String pitId, SearchContextIdForNode searchContextIdForNode) { + this.pitId = pitId; + this.searchContextIdForNode = searchContextIdForNode; + } + + PitSearchContextIdForNode(StreamInput in) throws IOException { + this.pitId = in.readString(); + this.searchContextIdForNode = new SearchContextIdForNode(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(pitId); + searchContextIdForNode.writeTo(out); + } + + public String getPitId() { + return pitId; + } + + public SearchContextIdForNode getSearchContextIdForNode() { + return searchContextIdForNode; + } +} diff --git a/server/src/main/java/org/opensearch/action/search/SearchContextId.java b/server/src/main/java/org/opensearch/action/search/SearchContextId.java index c2bb46a7b0e57..8a9cf1dc9772d 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchContextId.java +++ b/server/src/main/java/org/opensearch/action/search/SearchContextId.java @@ -116,7 +116,7 @@ public static SearchContextId decode(NamedWriteableRegistry namedWriteableRegist } return new SearchContextId(Collections.unmodifiableMap(shards), Collections.unmodifiableMap(aliasFilters)); } catch (IOException e) { - throw new IllegalArgumentException(e); + throw new IllegalArgumentException("invalid id: [" + id + "]", e); } } diff --git a/server/src/main/java/org/opensearch/action/search/SearchContextIdForNode.java b/server/src/main/java/org/opensearch/action/search/SearchContextIdForNode.java index 8f16a6e3ee226..df655c39f845d 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchContextIdForNode.java +++ b/server/src/main/java/org/opensearch/action/search/SearchContextIdForNode.java @@ -45,7 +45,7 @@ * * @opensearch.internal */ -public final class SearchContextIdForNode implements Writeable { +final class SearchContextIdForNode implements Writeable { private final String node; private final ShardSearchContextId searchContextId; private final String clusterAlias; diff --git a/server/src/main/java/org/opensearch/action/search/SearchTransportService.java b/server/src/main/java/org/opensearch/action/search/SearchTransportService.java index f91276960397a..e667fdff53312 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/opensearch/action/search/SearchTransportService.java @@ -95,6 +95,8 @@ public class SearchTransportService { public static final String FETCH_ID_SCROLL_ACTION_NAME = "indices:data/read/search[phase/fetch/id/scroll]"; public static final String FETCH_ID_ACTION_NAME = "indices:data/read/search[phase/fetch/id]"; public static final String QUERY_CAN_MATCH_NAME = "indices:data/read/search[can_match]"; + public static final String CREATE_READER_CONTEXT_ACTION_NAME = "indices:data/read/search[create_context]"; + public static final String UPDATE_READER_CONTEXT_ACTION_NAME = "indices:data/read/search[update_context]"; private final TransportService transportService; private final BiFunction responseWrapper; @@ -142,6 +144,36 @@ public void sendFreeContext( ); } + public void updatePitContext( + Transport.Connection connection, + UpdatePitContextRequest request, + ActionListener actionListener + ) { + transportService.sendRequest( + connection, + UPDATE_READER_CONTEXT_ACTION_NAME, + request, + TransportRequestOptions.EMPTY, + new ActionListenerResponseHandler<>(actionListener, UpdatePitContextResponse::new) + ); + } + + public void createPitContext( + Transport.Connection connection, + TransportCreatePitAction.CreateReaderContextRequest request, + SearchTask task, + ActionListener actionListener + ) { + transportService.sendChildRequest( + connection, + CREATE_READER_CONTEXT_ACTION_NAME, + request, + task, + TransportRequestOptions.EMPTY, + new ActionListenerResponseHandler<>(actionListener, TransportCreatePitAction.CreateReaderContextResponse::new) + ); + } + public void sendCanMatch( Transport.Connection connection, final ShardSearchRequest request, @@ -562,6 +594,48 @@ public static void registerRequestHandler(TransportService transportService, Sea } ); TransportActionProxy.registerProxyAction(transportService, QUERY_CAN_MATCH_NAME, SearchService.CanMatchResponse::new); + transportService.registerRequestHandler( + CREATE_READER_CONTEXT_ACTION_NAME, + ThreadPool.Names.SAME, + TransportCreatePitAction.CreateReaderContextRequest::new, + (request, channel, task) -> { + ChannelActionListener< + TransportCreatePitAction.CreateReaderContextResponse, + TransportCreatePitAction.CreateReaderContextRequest> listener = new ChannelActionListener<>( + channel, + CREATE_READER_CONTEXT_ACTION_NAME, + request + ); + searchService.createPitReaderContext( + request.getShardId(), + request.getKeepAlive(), + ActionListener.wrap( + r -> listener.onResponse(new TransportCreatePitAction.CreateReaderContextResponse(r)), + listener::onFailure + ) + ); + } + ); + TransportActionProxy.registerProxyAction( + transportService, + CREATE_READER_CONTEXT_ACTION_NAME, + TransportCreatePitAction.CreateReaderContextResponse::new + ); + + transportService.registerRequestHandler( + UPDATE_READER_CONTEXT_ACTION_NAME, + ThreadPool.Names.SAME, + UpdatePitContextRequest::new, + (request, channel, task) -> { + ChannelActionListener listener = new ChannelActionListener<>( + channel, + UPDATE_READER_CONTEXT_ACTION_NAME, + request + ); + searchService.updatePitIdAndKeepAlive(request, listener); + } + ); + TransportActionProxy.registerProxyAction(transportService, UPDATE_READER_CONTEXT_ACTION_NAME, UpdatePitContextResponse::new); } /** diff --git a/server/src/main/java/org/opensearch/action/search/SearchUtils.java b/server/src/main/java/org/opensearch/action/search/SearchUtils.java new file mode 100644 index 0000000000000..96fcda0d491c9 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/SearchUtils.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.ActionListener; +import org.opensearch.action.StepListener; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.transport.RemoteClusterService; + +import java.util.Set; +import java.util.function.BiFunction; + +/** + * Helper class for common search functions + */ +public class SearchUtils { + + public SearchUtils() {} + + /** + * Get connection lookup listener for list of clusters passed + */ + public static ActionListener> getConnectionLookupListener( + RemoteClusterService remoteClusterService, + ClusterState state, + Set clusters + ) { + final StepListener> lookupListener = new StepListener<>(); + + if (clusters.isEmpty()) { + lookupListener.onResponse((cluster, nodeId) -> state.getNodes().get(nodeId)); + } else { + remoteClusterService.collectNodes(clusters, lookupListener); + } + return lookupListener; + } +} diff --git a/server/src/main/java/org/opensearch/action/search/TransportCreatePitAction.java b/server/src/main/java/org/opensearch/action/search/TransportCreatePitAction.java new file mode 100644 index 0000000000000..c6bf610edfb9a --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/TransportCreatePitAction.java @@ -0,0 +1,133 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.action.ActionListener; +import org.opensearch.action.StepListener; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.HandledTransportAction; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.index.shard.ShardId; +import org.opensearch.search.SearchPhaseResult; +import org.opensearch.search.internal.ShardSearchContextId; +import org.opensearch.tasks.Task; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.Arrays; + +/** + * Transport action for creating PIT reader context + */ +public class TransportCreatePitAction extends HandledTransportAction { + + public static final String CREATE_PIT_ACTION = "create_pit"; + private final TransportService transportService; + private final SearchTransportService searchTransportService; + private final ClusterService clusterService; + private final TransportSearchAction transportSearchAction; + private final NamedWriteableRegistry namedWriteableRegistry; + private final CreatePitController createPitController; + + @Inject + public TransportCreatePitAction( + TransportService transportService, + ActionFilters actionFilters, + SearchTransportService searchTransportService, + ClusterService clusterService, + TransportSearchAction transportSearchAction, + NamedWriteableRegistry namedWriteableRegistry, + CreatePitController createPitController + ) { + super(CreatePitAction.NAME, transportService, actionFilters, in -> new CreatePitRequest(in)); + this.transportService = transportService; + this.searchTransportService = searchTransportService; + this.clusterService = clusterService; + this.transportSearchAction = transportSearchAction; + this.namedWriteableRegistry = namedWriteableRegistry; + this.createPitController = createPitController; + } + + @Override + protected void doExecute(Task task, CreatePitRequest request, ActionListener listener) { + final StepListener createPitListener = new StepListener<>(); + final ActionListener updatePitIdListener = ActionListener.wrap(r -> listener.onResponse(r), e -> { + logger.error( + () -> new ParameterizedMessage( + "PIT creation failed while updating PIT ID for indices [{}]", + Arrays.toString(request.indices()) + ) + ); + listener.onFailure(e); + }); + createPitController.executeCreatePit(request, task, createPitListener, updatePitIdListener); + } + + /** + * Request to create pit reader context with keep alive + */ + public static class CreateReaderContextRequest extends TransportRequest { + private final ShardId shardId; + private final TimeValue keepAlive; + + public CreateReaderContextRequest(ShardId shardId, TimeValue keepAlive) { + this.shardId = shardId; + this.keepAlive = keepAlive; + } + + public ShardId getShardId() { + return shardId; + } + + public TimeValue getKeepAlive() { + return keepAlive; + } + + public CreateReaderContextRequest(StreamInput in) throws IOException { + super(in); + this.shardId = new ShardId(in); + this.keepAlive = in.readTimeValue(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + shardId.writeTo(out); + out.writeTimeValue(keepAlive); + } + } + + /** + * Create pit reader context response which holds the contextId + */ + public static class CreateReaderContextResponse extends SearchPhaseResult { + public CreateReaderContextResponse(ShardSearchContextId shardSearchContextId) { + this.contextId = shardSearchContextId; + } + + public CreateReaderContextResponse(StreamInput in) throws IOException { + super(in); + contextId = new ShardSearchContextId(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + contextId.writeTo(out); + } + } + +} diff --git a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java index ebb0f21d6fe16..1ca477942cdf6 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java @@ -65,6 +65,7 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.AtomicArray; import org.opensearch.common.util.concurrent.CountDown; import org.opensearch.index.Index; import org.opensearch.index.query.Rewriteable; @@ -297,6 +298,81 @@ void executeOnShardTarget( ); } + public void executeRequest( + Task task, + SearchRequest searchRequest, + String actionName, + boolean includeSearchContext, + SinglePhaseSearchAction phaseSearchAction, + ActionListener listener + ) { + executeRequest(task, searchRequest, new SearchAsyncActionProvider() { + @Override + public AbstractSearchAsyncAction asyncSearchAction( + SearchTask task, + SearchRequest searchRequest, + Executor executor, + GroupShardsIterator shardsIts, + SearchTimeProvider timeProvider, + BiFunction connectionLookup, + ClusterState clusterState, + Map aliasFilter, + Map concreteIndexBoosts, + Map> indexRoutings, + ActionListener listener, + boolean preFilter, + ThreadPool threadPool, + SearchResponse.Clusters clusters + ) { + return new AbstractSearchAsyncAction( + actionName, + logger, + searchTransportService, + connectionLookup, + aliasFilter, + concreteIndexBoosts, + indexRoutings, + executor, + searchRequest, + listener, + shardsIts, + timeProvider, + clusterState, + task, + new ArraySearchPhaseResults<>(shardsIts.size()), + searchRequest.getMaxConcurrentShardRequests(), + clusters + ) { + @Override + protected void executePhaseOnShard( + SearchShardIterator shardIt, + SearchShardTarget shard, + SearchActionListener listener + ) { + final Transport.Connection connection = getConnection(shard.getClusterAlias(), shard.getNodeId()); + phaseSearchAction.executeOnShardTarget(task, shard, connection, listener); + } + + @Override + protected SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context) { + return new SearchPhase(getName()) { + @Override + public void run() { + final AtomicArray atomicArray = results.getAtomicArray(); + sendSearchResponse(InternalSearchResponse.empty(), atomicArray); + } + }; + } + + @Override + boolean buildPointInTimeFromSearchResults() { + return includeSearchContext; + } + }; + } + }, listener); + } + private void executeRequest( Task task, SearchRequest searchRequest, diff --git a/server/src/main/java/org/opensearch/action/search/UpdatePitContextRequest.java b/server/src/main/java/org/opensearch/action/search/UpdatePitContextRequest.java new file mode 100644 index 0000000000000..e6c9befb7938f --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/UpdatePitContextRequest.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.search.internal.ShardSearchContextId; +import org.opensearch.transport.TransportRequest; + +import java.io.IOException; + +/** + * Request used to update PIT reader contexts with pitId, keepAlive and creationTime + */ +public class UpdatePitContextRequest extends TransportRequest { + private final String pitId; + private final long keepAlive; + + private final long creationTime; + private final ShardSearchContextId searchContextId; + + public UpdatePitContextRequest(ShardSearchContextId searchContextId, String pitId, long keepAlive, long creationTime) { + this.pitId = pitId; + this.searchContextId = searchContextId; + this.keepAlive = keepAlive; + this.creationTime = creationTime; + } + + UpdatePitContextRequest(StreamInput in) throws IOException { + super(in); + pitId = in.readString(); + keepAlive = in.readLong(); + creationTime = in.readLong(); + searchContextId = new ShardSearchContextId(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(pitId); + out.writeLong(keepAlive); + out.writeLong(creationTime); + searchContextId.writeTo(out); + } + + public ShardSearchContextId getSearchContextId() { + return searchContextId; + } + + public String getPitId() { + return pitId; + } + + public long getCreationTime() { + return creationTime; + } + + public long getKeepAlive() { + return keepAlive; + } +} diff --git a/server/src/main/java/org/opensearch/action/search/UpdatePitContextResponse.java b/server/src/main/java/org/opensearch/action/search/UpdatePitContextResponse.java new file mode 100644 index 0000000000000..919dd87ea3041 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/UpdatePitContextResponse.java @@ -0,0 +1,58 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.transport.TransportResponse; + +import java.io.IOException; + +/** + * Update PIT context response with creation time, keep alive etc. + */ +public class UpdatePitContextResponse extends TransportResponse { + private final String pitId; + + private final long creationTime; + + private final long keepAlive; + + UpdatePitContextResponse(StreamInput in) throws IOException { + super(in); + pitId = in.readString(); + creationTime = in.readLong(); + keepAlive = in.readLong(); + } + + public UpdatePitContextResponse(String pitId, long creationTime, long keepAlive) { + this.pitId = pitId; + this.keepAlive = keepAlive; + this.creationTime = creationTime; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(pitId); + out.writeLong(creationTime); + out.writeLong(keepAlive); + } + + public String getPitId() { + return pitId; + } + + public long getKeepAlive() { + return keepAlive; + } + + public long getCreationTime() { + return creationTime; + } +} diff --git a/server/src/main/java/org/opensearch/client/Client.java b/server/src/main/java/org/opensearch/client/Client.java index 50f8f52253815..a73f8200ab277 100644 --- a/server/src/main/java/org/opensearch/client/Client.java +++ b/server/src/main/java/org/opensearch/client/Client.java @@ -58,6 +58,8 @@ import org.opensearch.action.search.ClearScrollRequest; import org.opensearch.action.search.ClearScrollRequestBuilder; import org.opensearch.action.search.ClearScrollResponse; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.MultiSearchRequestBuilder; import org.opensearch.action.search.MultiSearchResponse; @@ -325,6 +327,11 @@ public interface Client extends OpenSearchClient, Releasable { */ SearchScrollRequestBuilder prepareSearchScroll(String scrollId); + /** + * Create point in time for one or more indices + */ + void createPit(CreatePitRequest createPITRequest, ActionListener listener); + /** * Performs multiple search requests. */ diff --git a/server/src/main/java/org/opensearch/client/support/AbstractClient.java b/server/src/main/java/org/opensearch/client/support/AbstractClient.java index 4fdf4b1166bd6..6cc0827310bd1 100644 --- a/server/src/main/java/org/opensearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/opensearch/client/support/AbstractClient.java @@ -324,6 +324,9 @@ import org.opensearch.action.search.ClearScrollRequest; import org.opensearch.action.search.ClearScrollRequestBuilder; import org.opensearch.action.search.ClearScrollResponse; +import org.opensearch.action.search.CreatePitAction; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; import org.opensearch.action.search.MultiSearchAction; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.MultiSearchRequestBuilder; @@ -574,6 +577,11 @@ public SearchScrollRequestBuilder prepareSearchScroll(String scrollId) { return new SearchScrollRequestBuilder(this, SearchScrollAction.INSTANCE, scrollId); } + @Override + public void createPit(final CreatePitRequest createPITRequest, final ActionListener listener) { + execute(CreatePitAction.INSTANCE, createPITRequest, listener); + } + @Override public ActionFuture multiSearch(MultiSearchRequest request) { return execute(MultiSearchAction.INSTANCE, request); diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index f3d2ab0859513..11cb2ca316235 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -32,6 +32,7 @@ package org.opensearch.common.settings; import org.apache.logging.log4j.LogManager; +import org.opensearch.action.search.CreatePitController; import org.opensearch.cluster.routing.allocation.decider.NodeLoadAwareAllocationDecider; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; @@ -467,6 +468,9 @@ public void apply(Settings value, Settings current, Settings previous) { MultiBucketConsumerService.MAX_BUCKET_SETTING, SearchService.LOW_LEVEL_CANCELLATION_SETTING, SearchService.MAX_OPEN_SCROLL_CONTEXT, + SearchService.MAX_OPEN_PIT_CONTEXT, + SearchService.MAX_PIT_KEEPALIVE_SETTING, + CreatePitController.PIT_INIT_KEEP_ALIVE, Node.WRITE_PORTS_FILE_SETTING, Node.NODE_NAME_SETTING, Node.NODE_ATTRIBUTES, diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index 75d7081e7729a..1a31bec5935c8 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -149,6 +149,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexSettings.INDEX_CHECK_ON_STARTUP, IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD, IndexSettings.MAX_SLICES_PER_SCROLL, + IndexSettings.MAX_SLICES_PER_PIT, IndexSettings.MAX_REGEX_LENGTH_SETTING, ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING, IndexSettings.INDEX_GC_DELETES_SETTING, diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index ed3f6002be073..eb8ba98e61890 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -451,6 +451,17 @@ public final class IndexSettings { Property.IndexScope ); + /** + * The maximum number of slices allowed in a search request with PIT + */ + public static final Setting MAX_SLICES_PER_PIT = Setting.intSetting( + "index.max_slices_per_pit", + 1024, + 1, + Property.Dynamic, + Property.IndexScope + ); + /** * The maximum length of regex string allowed in a regexp query. */ @@ -604,7 +615,10 @@ private void setRetentionLeaseMillis(final TimeValue retentionLease) { * The maximum number of slices allowed in a scroll request. */ private volatile int maxSlicesPerScroll; - + /** + * The maximum number of slices allowed in a PIT request. + */ + private volatile int maxSlicesPerPit; /** * The maximum length of regex string allowed in a regexp query. */ @@ -719,6 +733,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti maxShingleDiff = scopedSettings.get(MAX_SHINGLE_DIFF_SETTING); maxRefreshListeners = scopedSettings.get(MAX_REFRESH_LISTENERS_PER_SHARD); maxSlicesPerScroll = scopedSettings.get(MAX_SLICES_PER_SCROLL); + maxSlicesPerPit = scopedSettings.get(MAX_SLICES_PER_PIT); maxAnalyzedOffset = scopedSettings.get(MAX_ANALYZED_OFFSET_SETTING); maxTermsCount = scopedSettings.get(MAX_TERMS_COUNT_SETTING); maxRegexLength = scopedSettings.get(MAX_REGEX_LENGTH_SETTING); @@ -791,6 +806,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti scopedSettings.addSettingsUpdateConsumer(MAX_ANALYZED_OFFSET_SETTING, this::setHighlightMaxAnalyzedOffset); scopedSettings.addSettingsUpdateConsumer(MAX_TERMS_COUNT_SETTING, this::setMaxTermsCount); scopedSettings.addSettingsUpdateConsumer(MAX_SLICES_PER_SCROLL, this::setMaxSlicesPerScroll); + scopedSettings.addSettingsUpdateConsumer(MAX_SLICES_PER_PIT, this::setMaxSlicesPerPit); scopedSettings.addSettingsUpdateConsumer(DEFAULT_FIELD_SETTING, this::setDefaultFields); scopedSettings.addSettingsUpdateConsumer(INDEX_SEARCH_IDLE_AFTER, this::setSearchIdleAfter); scopedSettings.addSettingsUpdateConsumer(MAX_REGEX_LENGTH_SETTING, this::setMaxRegexLength); @@ -1262,6 +1278,17 @@ private void setMaxSlicesPerScroll(int value) { this.maxSlicesPerScroll = value; } + /** + * The maximum number of slices allowed in a PIT request. + */ + public int getMaxSlicesPerPit() { + return maxSlicesPerPit; + } + + private void setMaxSlicesPerPit(int value) { + this.maxSlicesPerPit = value; + } + /** * The maximum length of regex string allowed in a regexp query. */ diff --git a/server/src/main/java/org/opensearch/index/shard/SearchOperationListener.java b/server/src/main/java/org/opensearch/index/shard/SearchOperationListener.java index d3177055a5bd8..0a7c80f5e87d3 100644 --- a/server/src/main/java/org/opensearch/index/shard/SearchOperationListener.java +++ b/server/src/main/java/org/opensearch/index/shard/SearchOperationListener.java @@ -131,6 +131,19 @@ default void onFreeScrollContext(ReaderContext readerContext) {} */ default void validateReaderContext(ReaderContext readerContext, TransportRequest transportRequest) {} + /** + * Executed when a new Point-In-Time {@link ReaderContext} was created + * @param readerContext the created reader context + */ + default void onNewPitContext(ReaderContext readerContext) {} + + /** + * Executed when a Point-In-Time search {@link SearchContext} is freed. + * This happens on deletion of a Point-In-Time or on it's keep-alive is expiring. + * @param readerContext the freed search context + */ + default void onFreePitContext(ReaderContext readerContext) {} + /** * A Composite listener that multiplexes calls to each of the listeners methods. */ @@ -265,5 +278,36 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest } ExceptionsHelper.reThrowIfNotNull(exception); } + + /** + * Executed when a new Point-In-Time {@link ReaderContext} was created + * @param readerContext the created reader context + */ + @Override + public void onNewPitContext(ReaderContext readerContext) { + for (SearchOperationListener listener : listeners) { + try { + listener.onNewPitContext(readerContext); + } catch (Exception e) { + logger.warn("onNewPitContext listener failed", e); + } + } + } + + /** + * Executed when a Point-In-Time search {@link SearchContext} is freed. + * This happens on deletion of a Point-In-Time or on it's keep-alive is expiring. + * @param readerContext the freed search context + */ + @Override + public void onFreePitContext(ReaderContext readerContext) { + for (SearchOperationListener listener : listeners) { + try { + listener.onFreePitContext(readerContext); + } catch (Exception e) { + logger.warn("onFreePitContext listener failed", e); + } + } + } } } diff --git a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java index d09143e3373b4..4bc40610facf2 100644 --- a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java @@ -49,6 +49,7 @@ import org.opensearch.common.lucene.search.Queries; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.concurrent.OpenSearchRejectedExecutionException; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; import org.opensearch.index.cache.bitset.BitsetFilterCache; @@ -75,6 +76,7 @@ import org.opensearch.search.fetch.subphase.ScriptFieldsContext; import org.opensearch.search.fetch.subphase.highlight.SearchHighlightContext; import org.opensearch.search.internal.ContextIndexSearcher; +import org.opensearch.search.internal.PitReaderContext; import org.opensearch.search.internal.ReaderContext; import org.opensearch.search.internal.ScrollContext; import org.opensearch.search.internal.SearchContext; @@ -287,7 +289,7 @@ public void preProcess(boolean rewrite) { } } - if (sliceBuilder != null) { + if (sliceBuilder != null && scrollContext() != null) { int sliceLimit = indexService.getIndexSettings().getMaxSlicesPerScroll(); int numSlices = sliceBuilder.getMax(); if (numSlices > sliceLimit) { @@ -304,6 +306,22 @@ public void preProcess(boolean rewrite) { } } + if (sliceBuilder != null && readerContext != null && readerContext instanceof PitReaderContext) { + int sliceLimit = indexService.getIndexSettings().getMaxSlicesPerPit(); + int numSlices = sliceBuilder.getMax(); + if (numSlices > sliceLimit) { + throw new OpenSearchRejectedExecutionException( + "The number of slices [" + + numSlices + + "] is too large. It must " + + "be less than [" + + sliceLimit + + "]. This limit can be set by changing the [" + + IndexSettings.MAX_SLICES_PER_PIT.getKey() + + "] index level setting." + ); + } + } // initialize the filtering alias based on the provided filters try { final QueryBuilder queryBuilder = request.getAliasFilter().getQueryBuilder(); diff --git a/server/src/main/java/org/opensearch/search/SearchService.java b/server/src/main/java/org/opensearch/search/SearchService.java index 3b24d52bebe53..74527f26ead9e 100644 --- a/server/src/main/java/org/opensearch/search/SearchService.java +++ b/server/src/main/java/org/opensearch/search/SearchService.java @@ -44,6 +44,8 @@ import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchShardTask; import org.opensearch.action.search.SearchType; +import org.opensearch.action.search.UpdatePitContextRequest; +import org.opensearch.action.search.UpdatePitContextResponse; import org.opensearch.action.support.TransportActions; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.service.ClusterService; @@ -111,6 +113,7 @@ import org.opensearch.search.internal.AliasFilter; import org.opensearch.search.internal.InternalScrollSearchRequest; import org.opensearch.search.internal.LegacyReaderContext; +import org.opensearch.search.internal.PitReaderContext; import org.opensearch.search.internal.ReaderContext; import org.opensearch.search.internal.SearchContext; import org.opensearch.search.internal.ShardSearchContextId; @@ -166,6 +169,15 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv Property.NodeScope, Property.Dynamic ); + /** + * This setting will help validate the max keep alive that can be set during creation or extension for a PIT reader context + */ + public static final Setting MAX_PIT_KEEPALIVE_SETTING = Setting.positiveTimeSetting( + "point_in_time.max_keep_alive", + timeValueHours(24), + Property.NodeScope, + Property.Dynamic + ); public static final Setting MAX_KEEPALIVE_SETTING = Setting.positiveTimeSetting( "search.max_keep_alive", timeValueHours(24), @@ -218,6 +230,19 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv Property.NodeScope ); + /** + * This setting defines the maximum number of active PIT reader contexts in the node , since each PIT context + * has a resource cost attached to it. This setting is less than scroll since users are + * encouraged to share the PIT details. + */ + public static final Setting MAX_OPEN_PIT_CONTEXT = Setting.intSetting( + "search.max_open_pit_context", + 300, + 0, + Property.Dynamic, + Property.NodeScope + ); + public static final int DEFAULT_SIZE = 10; public static final int DEFAULT_FROM = 0; @@ -243,6 +268,8 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv private volatile long maxKeepAlive; + private volatile long maxPitKeepAlive; + private volatile TimeValue defaultSearchTimeout; private volatile boolean defaultAllowPartialSearchResults; @@ -251,6 +278,8 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv private volatile int maxOpenScrollContext; + private volatile int maxOpenPitContext; + private final Cancellable keepAliveReaper; private final AtomicLong idGenerator = new AtomicLong(); @@ -260,6 +289,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv private final MultiBucketConsumerService multiBucketConsumerService; private final AtomicInteger openScrollContexts = new AtomicInteger(); + private final AtomicInteger openPitContexts = new AtomicInteger(); private final String sessionId = UUIDs.randomBase64UUID(); private final Executor indexSearcherExecutor; @@ -293,6 +323,14 @@ public SearchService( TimeValue keepAliveInterval = KEEPALIVE_INTERVAL_SETTING.get(settings); setKeepAlives(DEFAULT_KEEPALIVE_SETTING.get(settings), MAX_KEEPALIVE_SETTING.get(settings)); + setPitKeepAlives(DEFAULT_KEEPALIVE_SETTING.get(settings), MAX_PIT_KEEPALIVE_SETTING.get(settings)); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer( + DEFAULT_KEEPALIVE_SETTING, + MAX_PIT_KEEPALIVE_SETTING, + this::setPitKeepAlives, + this::validatePitKeepAlives + ); clusterService.getClusterSettings() .addSettingsUpdateConsumer(DEFAULT_KEEPALIVE_SETTING, MAX_KEEPALIVE_SETTING, this::setKeepAlives, this::validateKeepAlives); @@ -309,6 +347,9 @@ public SearchService( maxOpenScrollContext = MAX_OPEN_SCROLL_CONTEXT.get(settings); clusterService.getClusterSettings().addSettingsUpdateConsumer(MAX_OPEN_SCROLL_CONTEXT, this::setMaxOpenScrollContext); + maxOpenPitContext = MAX_OPEN_PIT_CONTEXT.get(settings); + clusterService.getClusterSettings().addSettingsUpdateConsumer(MAX_OPEN_PIT_CONTEXT, this::setMaxOpenPitContext); + lowLevelCancellation = LOW_LEVEL_CANCELLATION_SETTING.get(settings); clusterService.getClusterSettings().addSettingsUpdateConsumer(LOW_LEVEL_CANCELLATION_SETTING, this::setLowLevelCancellation); } @@ -331,12 +372,38 @@ private void validateKeepAlives(TimeValue defaultKeepAlive, TimeValue maxKeepAli } } + /** + * Default keep alive search setting should be less than max PIT keep alive + */ + private void validatePitKeepAlives(TimeValue defaultKeepAlive, TimeValue maxPitKeepAlive) { + if (defaultKeepAlive.millis() > maxPitKeepAlive.millis()) { + throw new IllegalArgumentException( + "Default keep alive setting for request [" + + DEFAULT_KEEPALIVE_SETTING.getKey() + + "]" + + " should be smaller than max keep alive for PIT [" + + MAX_PIT_KEEPALIVE_SETTING.getKey() + + "], " + + "was (" + + defaultKeepAlive + + " > " + + maxPitKeepAlive + + ")" + ); + } + } + private void setKeepAlives(TimeValue defaultKeepAlive, TimeValue maxKeepAlive) { validateKeepAlives(defaultKeepAlive, maxKeepAlive); this.defaultKeepAlive = defaultKeepAlive.millis(); this.maxKeepAlive = maxKeepAlive.millis(); } + private void setPitKeepAlives(TimeValue defaultKeepAlive, TimeValue maxPitKeepAlive) { + validatePitKeepAlives(defaultKeepAlive, maxPitKeepAlive); + this.maxPitKeepAlive = maxPitKeepAlive.millis(); + } + private void setDefaultSearchTimeout(TimeValue defaultSearchTimeout) { this.defaultSearchTimeout = defaultSearchTimeout; } @@ -353,6 +420,10 @@ private void setMaxOpenScrollContext(int maxOpenScrollContext) { this.maxOpenScrollContext = maxOpenScrollContext; } + private void setMaxOpenPitContext(int maxOpenPitContext) { + this.maxOpenPitContext = maxOpenPitContext; + } + private void setLowLevelCancellation(Boolean lowLevelCancellation) { this.lowLevelCancellation = lowLevelCancellation; } @@ -793,8 +864,8 @@ final ReaderContext createAndPutReaderContext( * Opens the reader context for given shardId. The newly opened reader context will be keep * until the {@code keepAlive} elapsed unless it is manually released. */ - public void openReaderContext(ShardId shardId, TimeValue keepAlive, ActionListener listener) { - checkKeepAliveLimit(keepAlive.millis()); + public void createPitReaderContext(ShardId shardId, TimeValue keepAlive, ActionListener listener) { + checkPitKeepAliveLimit(keepAlive.millis()); final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); final IndexShard shard = indexService.getShard(shardId.id()); final SearchOperationListener searchOperationListener = shard.getSearchOperationListener(); @@ -802,13 +873,31 @@ public void openReaderContext(ShardId shardId, TimeValue keepAlive, ActionListen Engine.SearcherSupplier searcherSupplier = null; ReaderContext readerContext = null; try { + if (openPitContexts.incrementAndGet() > maxOpenPitContext) { + throw new OpenSearchRejectedExecutionException( + "Trying to create too many Point In Time contexts. Must be less than or equal to: [" + + maxOpenPitContext + + "]. " + + "This limit can be set by changing the [" + + MAX_OPEN_PIT_CONTEXT.getKey() + + "] setting." + ); + } searcherSupplier = shard.acquireSearcherSupplier(); final ShardSearchContextId id = new ShardSearchContextId(sessionId, idGenerator.incrementAndGet()); - readerContext = new ReaderContext(id, indexService, shard, searcherSupplier, keepAlive.millis(), false); + readerContext = new PitReaderContext(id, indexService, shard, searcherSupplier, keepAlive.millis(), false); final ReaderContext finalReaderContext = readerContext; searcherSupplier = null; // transfer ownership to reader context + searchOperationListener.onNewReaderContext(readerContext); - readerContext.addOnClose(() -> searchOperationListener.onFreeReaderContext(finalReaderContext)); + searchOperationListener.onNewPitContext(finalReaderContext); + + readerContext.addOnClose(() -> { + openPitContexts.decrementAndGet(); + searchOperationListener.onFreeReaderContext(finalReaderContext); + searchOperationListener.onFreePitContext(finalReaderContext); + }); + // add the newly created pit reader context to active readers putReaderContext(readerContext); readerContext = null; listener.onResponse(finalReaderContext.id()); @@ -819,6 +908,40 @@ public void openReaderContext(ShardId shardId, TimeValue keepAlive, ActionListen }); } + /** + * Update PIT reader with pit id, keep alive and created time etc + */ + public void updatePitIdAndKeepAlive(UpdatePitContextRequest request, ActionListener listener) { + checkPitKeepAliveLimit(request.getKeepAlive()); + PitReaderContext readerContext = getPitReaderContext(request.getSearchContextId()); + if (readerContext == null) { + throw new SearchContextMissingException(request.getSearchContextId()); + } + Releasable updatePit = null; + try { + updatePit = readerContext.updatePitIdAndKeepAlive(request.getKeepAlive(), request.getPitId(), request.getCreationTime()); + listener.onResponse(new UpdatePitContextResponse(request.getPitId(), request.getCreationTime(), request.getKeepAlive())); + } catch (Exception e) { + freeReaderContext(readerContext.id()); + listener.onFailure(e); + } finally { + if (updatePit != null) { + updatePit.close(); + } + } + } + + /** + * Returns pit reader context based on ID + */ + public PitReaderContext getPitReaderContext(ShardSearchContextId id) { + ReaderContext context = activeReaders.get(id.getId()); + if (context instanceof PitReaderContext) { + return (PitReaderContext) context; + } + return null; + } + final SearchContext createContext( ReaderContext readerContext, ShardSearchRequest request, @@ -944,7 +1067,11 @@ private long getKeepAlive(ShardSearchRequest request) { if (request.scroll() != null) { return getScrollKeepAlive(request.scroll()); } else if (request.keepAlive() != null) { - checkKeepAliveLimit(request.keepAlive().millis()); + if (getReaderContext(request.readerId()) instanceof PitReaderContext) { + checkPitKeepAliveLimit(request.keepAlive().millis()); + } else { + checkKeepAliveLimit(request.keepAlive().millis()); + } return request.keepAlive().getMillis(); } else { return request.readerId() == null ? defaultKeepAlive : -1; @@ -975,6 +1102,25 @@ private void checkKeepAliveLimit(long keepAlive) { } } + /** + * check if request keep alive is greater than max keep alive + */ + private void checkPitKeepAliveLimit(long keepAlive) { + if (keepAlive > maxPitKeepAlive) { + throw new IllegalArgumentException( + "Keep alive for request (" + + TimeValue.timeValueMillis(keepAlive) + + ") is too large. " + + "It must be less than (" + + TimeValue.timeValueMillis(maxPitKeepAlive) + + "). " + + "This limit can be set by changing the [" + + MAX_PIT_KEEPALIVE_SETTING.getKey() + + "] cluster level setting." + ); + } + } + private ActionListener wrapFailureListener(ActionListener listener, ReaderContext context, Releasable releasable) { return new ActionListener() { @Override @@ -1165,8 +1311,8 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc } if (source.slice() != null) { - if (context.scrollContext() == null) { - throw new SearchException(shardTarget, "`slice` cannot be used outside of a scroll context"); + if (context.scrollContext() == null && !(context.readerContext() instanceof PitReaderContext)) { + throw new SearchException(shardTarget, "`slice` cannot be used outside of a scroll context or PIT context"); } context.sliceBuilder(source.slice()); } diff --git a/server/src/main/java/org/opensearch/search/internal/PitReaderContext.java b/server/src/main/java/org/opensearch/search/internal/PitReaderContext.java new file mode 100644 index 0000000000000..1b2400bdadfa1 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/internal/PitReaderContext.java @@ -0,0 +1,70 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.internal; + +import org.apache.lucene.util.SetOnce; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; +import org.opensearch.index.IndexService; +import org.opensearch.index.engine.Engine; +import org.opensearch.index.shard.IndexShard; + +/** + * PIT reader context containing PIT specific information such as pit id, create time etc. + */ +public class PitReaderContext extends ReaderContext { + + // Storing the encoded PIT ID as part of PIT reader context for use cases such as list pit API + private final SetOnce pitId = new SetOnce<>(); + // Creation time of PIT contexts which helps users to differentiate between multiple PIT reader contexts + private final SetOnce creationTime = new SetOnce<>(); + + public PitReaderContext( + ShardSearchContextId id, + IndexService indexService, + IndexShard indexShard, + Engine.SearcherSupplier searcherSupplier, + long keepAliveInMillis, + boolean singleSession + ) { + super(id, indexService, indexShard, searcherSupplier, keepAliveInMillis, singleSession); + } + + public String getPitId() { + return this.pitId.get(); + } + + public void setPitId(final String pitId) { + this.pitId.set(pitId); + } + + /** + * Returns a releasable to indicate that the caller has stopped using this reader. + * The pit id can be updated and time to live of the reader usage can be extended using the provided + * keepAliveInMillis. + */ + public Releasable updatePitIdAndKeepAlive(long keepAliveInMillis, String pitId, long createTime) { + getRefCounted().incRef(); + tryUpdateKeepAlive(keepAliveInMillis); + setPitId(pitId); + setCreationTime(createTime); + return Releasables.releaseOnce(() -> { + updateLastAccessTime(); + getRefCounted().decRef(); + }); + } + + public long getCreationTime() { + return this.creationTime.get(); + } + + public void setCreationTime(final long creationTime) { + this.creationTime.set(creationTime); + } +} diff --git a/server/src/main/java/org/opensearch/search/internal/ReaderContext.java b/server/src/main/java/org/opensearch/search/internal/ReaderContext.java index 5bcc491f4ffdb..fe644e44f297f 100644 --- a/server/src/main/java/org/opensearch/search/internal/ReaderContext.java +++ b/server/src/main/java/org/opensearch/search/internal/ReaderContext.java @@ -105,7 +105,15 @@ public void validate(TransportRequest request) { indexShard.getSearchOperationListener().validateReaderContext(this, request); } - private long nowInMillis() { + protected AbstractRefCounted getRefCounted() { + return refCounted; + } + + protected void updateLastAccessTime() { + this.lastAccessTime.updateAndGet(curr -> Math.max(curr, nowInMillis())); + } + + protected long nowInMillis() { return indexShard.getThreadPool().relativeTimeInMillis(); } @@ -140,7 +148,10 @@ public Engine.Searcher acquireSearcher(String source) { return searcherSupplier.acquireSearcher(source); } - private void tryUpdateKeepAlive(long keepAlive) { + /** + * Update keep alive if it is greater than current keep alive + */ + public void tryUpdateKeepAlive(long keepAlive) { this.keepAlive.updateAndGet(curr -> Math.max(curr, keepAlive)); } diff --git a/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java b/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java new file mode 100644 index 0000000000000..09c5bd834f7d5 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java @@ -0,0 +1,460 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.apache.lucene.search.TotalHits; +import org.junit.Before; +import org.opensearch.Version; +import org.opensearch.action.ActionListener; +import org.opensearch.action.LatchedActionListener; +import org.opensearch.action.StepListener; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.index.query.IdsQueryBuilder; +import org.opensearch.index.query.MatchAllQueryBuilder; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.search.SearchHit; +import org.opensearch.search.SearchHits; +import org.opensearch.search.aggregations.InternalAggregations; +import org.opensearch.search.internal.InternalSearchResponse; +import org.opensearch.tasks.Task; +import org.opensearch.tasks.TaskId; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.transport.MockTransportService; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.RemoteClusterConnectionTests; +import org.opensearch.transport.Transport; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.opensearch.action.search.PitTestsUtil.getPitId; + +/** + * Functional tests for various methods in create pit controller. Covers update pit phase specifically since + * integration tests don't cover it. + */ +public class CreatePitControllerTests extends OpenSearchTestCase { + + DiscoveryNode node1 = null; + DiscoveryNode node2 = null; + DiscoveryNode node3 = null; + String pitId = null; + TransportSearchAction transportSearchAction = null; + Task task = null; + DiscoveryNodes nodes = null; + NamedWriteableRegistry namedWriteableRegistry = null; + SearchResponse searchResponse = null; + ActionListener createPitListener = null; + ClusterService clusterServiceMock = null; + + private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); + + @Override + public void tearDown() throws Exception { + super.tearDown(); + ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); + } + + private MockTransportService startTransport(String id, List knownNodes, Version version) { + return startTransport(id, knownNodes, version, Settings.EMPTY); + } + + private MockTransportService startTransport( + final String id, + final List knownNodes, + final Version version, + final Settings settings + ) { + return RemoteClusterConnectionTests.startTransport(id, knownNodes, version, threadPool, settings); + } + + @Before + public void setupData() { + node1 = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Version.CURRENT); + node2 = new DiscoveryNode("node_2", buildNewFakeTransportAddress(), Version.CURRENT); + node3 = new DiscoveryNode("node_3", buildNewFakeTransportAddress(), Version.CURRENT); + pitId = getPitId(); + namedWriteableRegistry = new NamedWriteableRegistry( + Arrays.asList( + new NamedWriteableRegistry.Entry(QueryBuilder.class, TermQueryBuilder.NAME, TermQueryBuilder::new), + new NamedWriteableRegistry.Entry(QueryBuilder.class, MatchAllQueryBuilder.NAME, MatchAllQueryBuilder::new), + new NamedWriteableRegistry.Entry(QueryBuilder.class, IdsQueryBuilder.NAME, IdsQueryBuilder::new) + ) + ); + nodes = DiscoveryNodes.builder().add(node1).add(node2).add(node3).build(); + transportSearchAction = mock(TransportSearchAction.class); + task = new Task( + randomLong(), + "transport", + SearchAction.NAME, + "description", + new TaskId(randomLong() + ":" + randomLong()), + Collections.emptyMap() + ); + InternalSearchResponse response = new InternalSearchResponse( + new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), Float.NaN), + InternalAggregations.EMPTY, + null, + null, + false, + null, + 1 + ); + searchResponse = new SearchResponse( + response, + null, + 3, + 3, + 0, + 100, + ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY, + pitId + ); + createPitListener = new ActionListener() { + @Override + public void onResponse(CreatePitResponse createPITResponse) { + assertEquals(3, createPITResponse.getTotalShards()); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError(e); + } + }; + + clusterServiceMock = mock(ClusterService.class); + ClusterState state = mock(ClusterState.class); + + final Settings keepAliveSettings = Settings.builder().put(CreatePitController.PIT_INIT_KEEP_ALIVE.getKey(), 30000).build(); + when(clusterServiceMock.getSettings()).thenReturn(keepAliveSettings); + + when(state.getMetadata()).thenReturn(Metadata.EMPTY_METADATA); + when(state.metadata()).thenReturn(Metadata.EMPTY_METADATA); + when(clusterServiceMock.state()).thenReturn(state); + when(state.getNodes()).thenReturn(nodes); + } + + /** + * Test if transport call for update pit is made to all nodes present as part of PIT ID returned from phase one of create pit + */ + public void testUpdatePitAfterCreatePitSuccess() throws InterruptedException { + List updateNodesInvoked = new CopyOnWriteArrayList<>(); + List knownNodes = new CopyOnWriteArrayList<>(); + try ( + MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); + MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, Version.CURRENT) + ) { + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + knownNodes.add(cluster2Transport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + + try ( + MockTransportService transportService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + null + ) + ) { + transportService.start(); + transportService.acceptIncomingRequests(); + SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { + @Override + public void updatePitContext( + Transport.Connection connection, + UpdatePitContextRequest request, + ActionListener listener + ) { + updateNodesInvoked.add(connection.getNode()); + Thread t = new Thread(() -> listener.onResponse(new UpdatePitContextResponse("pitid", 500000, 500000))); + t.start(); + } + + @Override + public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + return new SearchAsyncActionTests.MockConnection(node); + } + }; + + CountDownLatch latch = new CountDownLatch(1); + + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + + CreatePitController controller = new CreatePitController( + searchTransportService, + clusterServiceMock, + transportSearchAction, + namedWriteableRegistry + ); + + ActionListener updatelistener = new LatchedActionListener<>(new ActionListener() { + @Override + public void onResponse(CreatePitResponse createPITResponse) { + assertEquals(3, createPITResponse.getTotalShards()); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError(e); + } + }, latch); + + StepListener createListener = new StepListener<>(); + controller.executeCreatePit(request, task, createListener, updatelistener); + createListener.onResponse(searchResponse); + latch.await(); + assertEquals(3, updateNodesInvoked.size()); + } + } + } + + /** + * If create phase results in failure, update pit phase should not proceed and propagate the exception + */ + public void testUpdatePitAfterCreatePitFailure() throws InterruptedException { + List updateNodesInvoked = new CopyOnWriteArrayList<>(); + List knownNodes = new CopyOnWriteArrayList<>(); + try ( + MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); + MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, Version.CURRENT) + ) { + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + knownNodes.add(cluster2Transport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + + try ( + MockTransportService transportService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + null + ) + ) { + transportService.start(); + transportService.acceptIncomingRequests(); + SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { + @Override + public void updatePitContext( + Transport.Connection connection, + UpdatePitContextRequest request, + ActionListener listener + ) { + updateNodesInvoked.add(connection.getNode()); + Thread t = new Thread(() -> listener.onResponse(new UpdatePitContextResponse("pitid", 500000, 500000))); + t.start(); + } + + @Override + public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + return new SearchAsyncActionTests.MockConnection(node); + } + }; + + CountDownLatch latch = new CountDownLatch(1); + + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + CreatePitController controller = new CreatePitController( + searchTransportService, + clusterServiceMock, + transportSearchAction, + namedWriteableRegistry + ); + + ActionListener updatelistener = new LatchedActionListener<>(new ActionListener() { + @Override + public void onResponse(CreatePitResponse createPITResponse) { + throw new AssertionError("on response is called"); + } + + @Override + public void onFailure(Exception e) { + assertTrue(e.getCause().getMessage().contains("Exception occurred in phase 1")); + } + }, latch); + + StepListener createListener = new StepListener<>(); + + controller.executeCreatePit(request, task, createListener, updatelistener); + createListener.onFailure(new Exception("Exception occurred in phase 1")); + latch.await(); + assertEquals(0, updateNodesInvoked.size()); + } + } + } + + /** + * Testing that any update pit failures fails the request + */ + public void testUpdatePitFailureForNodeDrop() throws InterruptedException { + List updateNodesInvoked = new CopyOnWriteArrayList<>(); + List knownNodes = new CopyOnWriteArrayList<>(); + try ( + MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); + MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, Version.CURRENT) + ) { + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + knownNodes.add(cluster2Transport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + + try ( + MockTransportService transportService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + null + ) + ) { + transportService.start(); + transportService.acceptIncomingRequests(); + + SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { + @Override + public void updatePitContext( + Transport.Connection connection, + UpdatePitContextRequest request, + ActionListener listener + ) { + + updateNodesInvoked.add(connection.getNode()); + if (connection.getNode().getId() == "node_3") { + Thread t = new Thread(() -> listener.onFailure(new Exception("node 3 down"))); + t.start(); + } else { + Thread t = new Thread(() -> listener.onResponse(new UpdatePitContextResponse("pitid", 500000, 500000))); + t.start(); + } + } + + @Override + public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + return new SearchAsyncActionTests.MockConnection(node); + } + }; + + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + CreatePitController controller = new CreatePitController( + searchTransportService, + clusterServiceMock, + transportSearchAction, + namedWriteableRegistry + ); + + CountDownLatch latch = new CountDownLatch(1); + + ActionListener updatelistener = new LatchedActionListener<>(new ActionListener() { + @Override + public void onResponse(CreatePitResponse createPITResponse) { + throw new AssertionError("response is called"); + } + + @Override + public void onFailure(Exception e) { + assertTrue(e.getMessage().contains("node 3 down")); + } + }, latch); + + StepListener createListener = new StepListener<>(); + controller.executeCreatePit(request, task, createListener, updatelistener); + createListener.onResponse(searchResponse); + latch.await(); + assertEquals(3, updateNodesInvoked.size()); + } + } + } + + public void testUpdatePitFailureWhereAllNodesDown() throws InterruptedException { + List updateNodesInvoked = new CopyOnWriteArrayList<>(); + List knownNodes = new CopyOnWriteArrayList<>(); + try ( + MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); + MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, Version.CURRENT) + ) { + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + knownNodes.add(cluster2Transport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + + try ( + MockTransportService transportService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + null + ) + ) { + transportService.start(); + transportService.acceptIncomingRequests(); + SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { + @Override + public void updatePitContext( + Transport.Connection connection, + UpdatePitContextRequest request, + ActionListener listener + ) { + updateNodesInvoked.add(connection.getNode()); + Thread t = new Thread(() -> listener.onFailure(new Exception("node down"))); + t.start(); + } + + @Override + public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + return new SearchAsyncActionTests.MockConnection(node); + } + }; + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + CreatePitController controller = new CreatePitController( + searchTransportService, + clusterServiceMock, + transportSearchAction, + namedWriteableRegistry + ); + + CountDownLatch latch = new CountDownLatch(1); + + ActionListener updatelistener = new LatchedActionListener<>(new ActionListener() { + @Override + public void onResponse(CreatePitResponse createPITResponse) { + throw new AssertionError("response is called"); + } + + @Override + public void onFailure(Exception e) { + assertTrue(e.getMessage().contains("node down")); + } + }, latch); + + StepListener createListener = new StepListener<>(); + controller.executeCreatePit(request, task, createListener, updatelistener); + createListener.onResponse(searchResponse); + latch.await(); + assertEquals(3, updateNodesInvoked.size()); + } + } + } + +} diff --git a/server/src/test/java/org/opensearch/action/search/PitTestsUtil.java b/server/src/test/java/org/opensearch/action/search/PitTestsUtil.java new file mode 100644 index 0000000000000..ec83cb45697d9 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/search/PitTestsUtil.java @@ -0,0 +1,84 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.Version; +import org.opensearch.common.util.concurrent.AtomicArray; +import org.opensearch.index.query.IdsQueryBuilder; +import org.opensearch.index.query.MatchAllQueryBuilder; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.index.shard.ShardId; +import org.opensearch.search.SearchPhaseResult; +import org.opensearch.search.SearchShardTarget; +import org.opensearch.search.internal.AliasFilter; +import org.opensearch.search.internal.ShardSearchContextId; + +import java.util.HashMap; +import java.util.Map; + +import static org.opensearch.test.OpenSearchTestCase.between; +import static org.opensearch.test.OpenSearchTestCase.randomAlphaOfLength; +import static org.opensearch.test.OpenSearchTestCase.randomBoolean; + +/** + * Helper class for common pit tests functions + */ +public class PitTestsUtil { + private PitTestsUtil() {} + + public static QueryBuilder randomQueryBuilder() { + if (randomBoolean()) { + return new TermQueryBuilder(randomAlphaOfLength(10), randomAlphaOfLength(10)); + } else if (randomBoolean()) { + return new MatchAllQueryBuilder(); + } else { + return new IdsQueryBuilder().addIds(randomAlphaOfLength(10)); + } + } + + public static String getPitId() { + AtomicArray array = new AtomicArray<>(3); + SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult1 = new SearchAsyncActionTests.TestSearchPhaseResult( + new ShardSearchContextId("a", 1), + null + ); + testSearchPhaseResult1.setSearchShardTarget(new SearchShardTarget("node_1", new ShardId("idx", "uuid1", 2), null, null)); + SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult2 = new SearchAsyncActionTests.TestSearchPhaseResult( + new ShardSearchContextId("b", 12), + null + ); + testSearchPhaseResult2.setSearchShardTarget(new SearchShardTarget("node_2", new ShardId("idy", "uuid2", 42), null, null)); + SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult3 = new SearchAsyncActionTests.TestSearchPhaseResult( + new ShardSearchContextId("c", 42), + null + ); + testSearchPhaseResult3.setSearchShardTarget(new SearchShardTarget("node_3", new ShardId("idy", "uuid2", 43), null, null)); + array.setOnce(0, testSearchPhaseResult1); + array.setOnce(1, testSearchPhaseResult2); + array.setOnce(2, testSearchPhaseResult3); + + final Version version = Version.CURRENT; + final Map aliasFilters = new HashMap<>(); + for (SearchPhaseResult result : array.asList()) { + final AliasFilter aliasFilter; + if (randomBoolean()) { + aliasFilter = new AliasFilter(randomQueryBuilder()); + } else if (randomBoolean()) { + aliasFilter = new AliasFilter(randomQueryBuilder(), "alias-" + between(1, 10)); + } else { + aliasFilter = AliasFilter.EMPTY; + } + if (randomBoolean()) { + aliasFilters.put(result.getSearchShardTarget().getShardId().getIndex().getUUID(), aliasFilter); + } + } + return SearchContextId.encode(array.asList(), aliasFilters, version); + } +} diff --git a/server/src/test/java/org/opensearch/search/CreatePitMultiNodeTests.java b/server/src/test/java/org/opensearch/search/CreatePitMultiNodeTests.java new file mode 100644 index 0000000000000..7eb7a62348c5f --- /dev/null +++ b/server/src/test/java/org/opensearch/search/CreatePitMultiNodeTests.java @@ -0,0 +1,202 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search; + +import org.junit.After; +import org.junit.Before; +import org.opensearch.action.ActionFuture; +import org.opensearch.action.search.CreatePitAction; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.search.builder.PointInTimeBuilder; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.concurrent.ExecutionException; + +import static org.hamcrest.Matchers.containsString; +import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +/** + * Multi node integration tests for PIT creation and search operation with PIT ID. + */ +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, numDataNodes = 2) +public class CreatePitMultiNodeTests extends OpenSearchIntegTestCase { + + @Before + public void setupIndex() throws ExecutionException, InterruptedException { + createIndex("index", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build()); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).execute().get(); + ensureGreen(); + } + + @After + public void clearIndex() { + client().admin().indices().prepareDelete("index").get(); + } + + public void testPit() throws Exception { + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + SearchResponse searchResponse = client().prepareSearch("index") + .setSize(2) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1))) + .get(); + assertEquals(2, searchResponse.getSuccessfulShards()); + assertEquals(2, searchResponse.getTotalShards()); + } + + public void testCreatePitWhileNodeDropWithAllowPartialCreationFalse() throws Exception { + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), false); + request.setIndices(new String[] { "index" }); + internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) throws Exception { + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + ExecutionException ex = expectThrows(ExecutionException.class, execute::get); + assertTrue(ex.getMessage().contains("Failed to execute phase [create_pit]")); + assertTrue(ex.getMessage().contains("Partial shards failure")); + return super.onNodeStopped(nodeName); + } + }); + } + + public void testCreatePitWhileNodeDropWithAllowPartialCreationTrue() throws Exception { + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) throws Exception { + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + assertEquals(1, pitResponse.getSuccessfulShards()); + assertEquals(2, pitResponse.getTotalShards()); + SearchResponse searchResponse = client().prepareSearch("index") + .setSize(2) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1))) + .get(); + assertEquals(1, searchResponse.getSuccessfulShards()); + assertEquals(1, searchResponse.getTotalShards()); + return super.onNodeStopped(nodeName); + } + }); + } + + public void testPitSearchWithNodeDrop() throws Exception { + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) throws Exception { + SearchResponse searchResponse = client().prepareSearch() + .setSize(2) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1))) + .get(); + assertEquals(1, searchResponse.getSuccessfulShards()); + assertEquals(1, searchResponse.getFailedShards()); + assertEquals(0, searchResponse.getSkippedShards()); + assertEquals(2, searchResponse.getTotalShards()); + return super.onNodeStopped(nodeName); + } + }); + } + + public void testPitSearchWithNodeDropWithPartialSearchResultsFalse() throws Exception { + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) throws Exception { + ActionFuture execute = client().prepareSearch() + .setSize(2) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1))) + .setAllowPartialSearchResults(false) + .execute(); + ExecutionException ex = expectThrows(ExecutionException.class, execute::get); + assertTrue(ex.getMessage().contains("Partial shards failure")); + return super.onNodeStopped(nodeName); + } + }); + } + + public void testPitInvalidDefaultKeepAlive() { + IllegalArgumentException exc = expectThrows( + IllegalArgumentException.class, + () -> client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put("point_in_time.max_keep_alive", "1m").put("search.default_keep_alive", "2m")) + .get() + ); + assertThat(exc.getMessage(), containsString("was (2m > 1m)")); + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put("search.default_keep_alive", "5m").put("point_in_time.max_keep_alive", "5m")) + .get() + ); + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put("search.default_keep_alive", "2m")) + .get() + ); + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put("point_in_time.max_keep_alive", "2m")) + .get() + ); + exc = expectThrows( + IllegalArgumentException.class, + () -> client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put("search.default_keep_alive", "3m")) + .get() + ); + assertThat(exc.getMessage(), containsString("was (3m > 2m)")); + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put("search.default_keep_alive", "1m")) + .get() + ); + exc = expectThrows( + IllegalArgumentException.class, + () -> client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put("point_in_time.max_keep_alive", "30s")) + .get() + ); + assertThat(exc.getMessage(), containsString("was (1m > 30s)")); + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().putNull("*")) + .setTransientSettings(Settings.builder().putNull("*")) + ); + } +} diff --git a/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java b/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java new file mode 100644 index 0000000000000..5c3c43af9cb66 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java @@ -0,0 +1,501 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search; + +import org.hamcrest.Matchers; +import org.opensearch.action.ActionFuture; +import org.opensearch.action.search.CreatePitAction; +import org.opensearch.action.search.CreatePitController; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.SearchPhaseExecutionException; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.Priority; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.index.IndexNotFoundException; +import org.opensearch.search.builder.PointInTimeBuilder; +import org.opensearch.search.sort.SortOrder; +import org.opensearch.test.OpenSearchSingleNodeTestCase; + +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.index.query.QueryBuilders.queryStringQuery; +import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; + +/** + * Single node integration tests for various PIT use cases such as create pit, search etc + */ +public class CreatePitSingleNodeTests extends OpenSearchSingleNodeTestCase { + @Override + protected boolean resetNodeAfterTest() { + return true; + } + + @Override + protected Settings nodeSettings() { + // very frequent checks + return Settings.builder() + .put(super.nodeSettings()) + .put(SearchService.KEEPALIVE_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(1)) + .put(CreatePitController.PIT_INIT_KEEP_ALIVE.getKey(), TimeValue.timeValueSeconds(1)) + .build(); + } + + public void testCreatePITSuccess() throws ExecutionException, InterruptedException { + createIndex("index", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build()); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + client().prepareIndex("index").setId("2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + SearchResponse searchResponse = client().prepareSearch("index") + .setSize(2) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1))) + .get(); + assertHitCount(searchResponse, 1); + + SearchService service = getInstanceFromNode(SearchService.class); + assertEquals(2, service.getActiveContexts()); + service.doClose(); // this kills the keep-alive reaper we have to reset the node after this test + } + + public void testCreatePITWithMultipleIndicesSuccess() throws ExecutionException, InterruptedException { + createIndex("index", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build()); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + createIndex("index1", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build()); + client().prepareIndex("index1").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index", "index1" }); + SearchService service = getInstanceFromNode(SearchService.class); + + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse response = execute.get(); + assertEquals(4, response.getSuccessfulShards()); + assertEquals(4, service.getActiveContexts()); + service.doClose(); + } + + public void testCreatePITWithShardReplicasSuccess() throws ExecutionException, InterruptedException { + createIndex("index", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 1).build()); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + + client().prepareIndex("index").setId("2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + SearchResponse searchResponse = client().prepareSearch("index") + .setSize(2) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1))) + .get(); + assertHitCount(searchResponse, 1); + + SearchService service = getInstanceFromNode(SearchService.class); + assertEquals(2, service.getActiveContexts()); + service.doClose(); + } + + public void testCreatePITWithNonExistentIndex() { + createIndex("index", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build()); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index", "index1" }); + SearchService service = getInstanceFromNode(SearchService.class); + + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + + ExecutionException ex = expectThrows(ExecutionException.class, execute::get); + + assertTrue(ex.getMessage().contains("no such index [index1]")); + assertEquals(0, service.getActiveContexts()); + service.doClose(); + } + + public void testCreatePITOnCloseIndex() { + createIndex("index", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build()); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + client().prepareIndex("index").setId("2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + client().admin().indices().prepareClose("index").get(); + + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + + ExecutionException ex = expectThrows(ExecutionException.class, execute::get); + + assertTrue(ex.getMessage().contains("IndexClosedException")); + + SearchService service = getInstanceFromNode(SearchService.class); + assertEquals(0, service.getActiveContexts()); + service.doClose(); + } + + public void testPitSearchOnDeletedIndex() throws ExecutionException, InterruptedException { + createIndex("index", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build()); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + client().admin().indices().prepareDelete("index").get(); + + IndexNotFoundException ex = expectThrows(IndexNotFoundException.class, () -> { + client().prepareSearch() + .setSize(2) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1))) + .get(); + }); + assertTrue(ex.getMessage().contains("no such index [index]")); + SearchService service = getInstanceFromNode(SearchService.class); + assertEquals(0, service.getActiveContexts()); + service.doClose(); + } + + public void testInvalidPitId() { + createIndex("idx"); + String id = "c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1"; + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> client().prepareSearch() + .setSize(2) + .setPointInTime(new PointInTimeBuilder(id).setKeepAlive(TimeValue.timeValueDays(1))) + .get() + ); + assertEquals("invalid id: [" + id + "]", e.getMessage()); + } + + public void testPitSearchOnCloseIndex() throws ExecutionException, InterruptedException { + createIndex("index", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build()); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + SearchService service = getInstanceFromNode(SearchService.class); + assertEquals(2, service.getActiveContexts()); + client().admin().indices().prepareClose("index").get(); + SearchPhaseExecutionException ex = expectThrows(SearchPhaseExecutionException.class, () -> { + SearchResponse searchResponse = client().prepareSearch() + .setSize(2) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1))) + .get(); + }); + assertTrue(ex.shardFailures()[0].reason().contains("SearchContextMissingException")); + assertEquals(0, service.getActiveContexts()); + + // PIT reader contexts are lost after close, verifying it with open index api + client().admin().indices().prepareOpen("index").get(); + ex = expectThrows(SearchPhaseExecutionException.class, () -> { + client().prepareSearch() + .setSize(2) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1))) + .get(); + }); + assertTrue(ex.shardFailures()[0].reason().contains("SearchContextMissingException")); + assertEquals(0, service.getActiveContexts()); + service.doClose(); + } + + public void testMaxOpenPitContexts() throws Exception { + createIndex("index"); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + SearchService service = getInstanceFromNode(SearchService.class); + + for (int i = 0; i < SearchService.MAX_OPEN_PIT_CONTEXT.get(Settings.EMPTY); i++) { + client().execute(CreatePitAction.INSTANCE, request).get(); + } + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + ExecutionException ex = expectThrows(ExecutionException.class, execute::get); + + assertTrue( + ex.getMessage() + .contains( + "Trying to create too many Point In Time contexts. " + + "Must be less than or equal to: [" + + SearchService.MAX_OPEN_PIT_CONTEXT.get(Settings.EMPTY) + + "]. " + + "This limit can be set by changing the [search.max_open_pit_context] setting." + ) + ); + service.doClose(); + } + + public void testOpenPitContextsConcurrently() throws Exception { + createIndex("index"); + final int maxPitContexts = SearchService.MAX_OPEN_PIT_CONTEXT.get(Settings.EMPTY); + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + SearchService service = getInstanceFromNode(SearchService.class); + Thread[] threads = new Thread[randomIntBetween(2, 8)]; + CountDownLatch latch = new CountDownLatch(threads.length); + for (int i = 0; i < threads.length; i++) { + threads[i] = new Thread(() -> { + latch.countDown(); + try { + latch.await(); + for (;;) { + try { + client().execute(CreatePitAction.INSTANCE, request).get(); + } catch (ExecutionException e) { + assertTrue( + e.getMessage() + .contains( + "Trying to create too many Point In Time contexts. " + + "Must be less than or equal to: [" + + SearchService.MAX_OPEN_PIT_CONTEXT.get(Settings.EMPTY) + + "]. " + + "This limit can be set by changing the [" + + SearchService.MAX_OPEN_PIT_CONTEXT.getKey() + + "] setting." + ) + ); + return; + } + } + } catch (Exception e) { + throw new AssertionError(e); + } + }); + threads[i].setName("opensearch[node_s_0][search]"); + threads[i].start(); + } + for (Thread thread : threads) { + thread.join(); + } + assertThat(service.getActiveContexts(), equalTo(maxPitContexts)); + service.doClose(); + } + + /** + * Point in time search should return the same results as creation time and index updates should not affect the PIT search results + */ + public void testPitAfterUpdateIndex() throws Exception { + client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 5)).get(); + client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); + + for (int i = 0; i < 50; i++) { + client().prepareIndex("test") + .setId(Integer.toString(i)) + .setSource( + jsonBuilder().startObject() + .field("user", "foobar") + .field("postDate", System.currentTimeMillis()) + .field("message", "test") + .endObject() + ) + .get(); + } + client().admin().indices().prepareRefresh().get(); + + // create pit + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueMinutes(2), true); + request.setIndices(new String[] { "test" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + SearchService service = getInstanceFromNode(SearchService.class); + + assertThat( + client().prepareSearch() + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .setSize(0) + .setQuery(matchAllQuery()) + .get() + .getHits() + .getTotalHits().value, + Matchers.equalTo(50L) + ); + + assertThat( + client().prepareSearch() + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .setSize(0) + .setQuery(termQuery("message", "test")) + .get() + .getHits() + .getTotalHits().value, + Matchers.equalTo(50L) + ); + assertThat( + client().prepareSearch() + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .setSize(0) + .setQuery(termQuery("message", "test")) + .get() + .getHits() + .getTotalHits().value, + Matchers.equalTo(50L) + ); + assertThat( + client().prepareSearch() + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .setSize(0) + .setQuery(termQuery("message", "update")) + .get() + .getHits() + .getTotalHits().value, + Matchers.equalTo(0L) + ); + assertThat( + client().prepareSearch() + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .setSize(0) + .setQuery(termQuery("message", "update")) + .get() + .getHits() + .getTotalHits().value, + Matchers.equalTo(0L) + ); + + // update index + SearchResponse searchResponse = client().prepareSearch() + .setQuery(queryStringQuery("user:foobar")) + .setSize(50) + .addSort("postDate", SortOrder.ASC) + .get(); + try { + do { + for (SearchHit searchHit : searchResponse.getHits().getHits()) { + Map map = searchHit.getSourceAsMap(); + map.put("message", "update"); + client().prepareIndex("test").setId(searchHit.getId()).setSource(map).get(); + } + searchResponse = client().prepareSearch().setSize(0).setQuery(termQuery("message", "test")).get(); + + } while (searchResponse.getHits().getHits().length > 0); + + client().admin().indices().prepareRefresh().get(); + assertThat( + client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get().getHits().getTotalHits().value, + Matchers.equalTo(50L) + ); + /** + * assert without point in time + */ + + assertThat( + client().prepareSearch().setSize(0).setQuery(termQuery("message", "test")).get().getHits().getTotalHits().value, + Matchers.equalTo(0L) + ); + assertThat( + client().prepareSearch().setSize(0).setQuery(termQuery("message", "test")).get().getHits().getTotalHits().value, + Matchers.equalTo(0L) + ); + assertThat( + client().prepareSearch().setSize(0).setQuery(termQuery("message", "update")).get().getHits().getTotalHits().value, + Matchers.equalTo(50L) + ); + assertThat( + client().prepareSearch().setSize(0).setQuery(termQuery("message", "update")).get().getHits().getTotalHits().value, + Matchers.equalTo(50L) + ); + /** + * using point in time id will have the same search results as ones before update + */ + assertThat( + client().prepareSearch() + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .setSize(0) + .setQuery(termQuery("message", "test")) + .get() + .getHits() + .getTotalHits().value, + Matchers.equalTo(50L) + ); + assertThat( + client().prepareSearch() + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .setSize(0) + .setQuery(termQuery("message", "test")) + .get() + .getHits() + .getTotalHits().value, + Matchers.equalTo(50L) + ); + assertThat( + client().prepareSearch() + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .setSize(0) + .setQuery(termQuery("message", "update")) + .get() + .getHits() + .getTotalHits().value, + Matchers.equalTo(0L) + ); + assertThat( + client().prepareSearch() + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .setSize(0) + .setQuery(termQuery("message", "update")) + .get() + .getHits() + .getTotalHits().value, + Matchers.equalTo(0L) + ); + } finally { + service.doClose(); + assertEquals(0, service.getActiveContexts()); + } + } + + public void testConcurrentSearches() throws Exception { + createIndex("index", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build()); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + Thread[] threads = new Thread[5]; + CountDownLatch latch = new CountDownLatch(threads.length); + + for (int i = 0; i < threads.length; i++) { + threads[i] = new Thread(() -> { + latch.countDown(); + try { + latch.await(); + for (int j = 0; j < 50; j++) { + client().prepareSearch() + .setSize(2) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1))) + .execute() + .get(); + } + } catch (Exception e) { + throw new AssertionError(e); + } + }); + threads[i].setName("opensearch[node_s_0][search]"); + threads[i].start(); + } + for (Thread thread : threads) { + thread.join(); + } + + SearchService service = getInstanceFromNode(SearchService.class); + assertEquals(2, service.getActiveContexts()); + service.doClose(); + assertEquals(0, service.getActiveContexts()); + } +} diff --git a/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java b/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java index 79184497b201c..96a4d9ad1d8d9 100644 --- a/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java +++ b/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java @@ -52,6 +52,7 @@ import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.MockBigArrays; import org.opensearch.common.util.MockPageCacheRecycler; +import org.opensearch.common.util.concurrent.OpenSearchRejectedExecutionException; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; import org.opensearch.index.cache.IndexCache; @@ -67,6 +68,7 @@ import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.search.internal.AliasFilter; import org.opensearch.search.internal.LegacyReaderContext; +import org.opensearch.search.internal.PitReaderContext; import org.opensearch.search.internal.ReaderContext; import org.opensearch.search.internal.ShardSearchContextId; import org.opensearch.search.internal.ShardSearchRequest; @@ -134,10 +136,12 @@ public void testPreProcess() throws Exception { int maxResultWindow = randomIntBetween(50, 100); int maxRescoreWindow = randomIntBetween(50, 100); int maxSlicesPerScroll = randomIntBetween(50, 100); + int maxSlicesPerPit = randomIntBetween(50, 100); Settings settings = Settings.builder() .put("index.max_result_window", maxResultWindow) .put("index.max_slices_per_scroll", maxSlicesPerScroll) .put("index.max_rescore_window", maxRescoreWindow) + .put("index.max_slices_per_pit", maxSlicesPerPit) .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) @@ -300,13 +304,13 @@ protected Engine.Searcher acquireSearcherInternal(String source) { ); readerContext.close(); - readerContext = new ReaderContext( + readerContext = new LegacyReaderContext( newContextId(), indexService, indexShard, searcherSupplier.get(), - randomNonNegativeLong(), - false + shardSearchRequest, + randomNonNegativeLong() ); // rescore is null but sliceBuilder is not null DefaultSearchContext context2 = new DefaultSearchContext( @@ -404,6 +408,52 @@ protected Engine.Searcher acquireSearcherInternal(String source) { assertTrue(query1 instanceof MatchNoDocsQuery || query2 instanceof MatchNoDocsQuery); readerContext.close(); + + ReaderContext pitReaderContext = new PitReaderContext( + newContextId(), + indexService, + indexShard, + searcherSupplier.get(), + 1000, + true + ); + DefaultSearchContext context5 = new DefaultSearchContext( + pitReaderContext, + shardSearchRequest, + target, + null, + bigArrays, + null, + timeout, + null, + false, + Version.CURRENT, + false, + executor + ); + int numSlicesForPit = maxSlicesPerPit + randomIntBetween(1, 100); + when(sliceBuilder.getMax()).thenReturn(numSlicesForPit); + context5.sliceBuilder(sliceBuilder); + + OpenSearchRejectedExecutionException exception1 = expectThrows( + OpenSearchRejectedExecutionException.class, + () -> context5.preProcess(false) + ); + assertThat( + exception1.getMessage(), + equalTo( + "The number of slices [" + + numSlicesForPit + + "] is too large. It must " + + "be less than [" + + maxSlicesPerPit + + "]. This limit can be set by changing the [" + + IndexSettings.MAX_SLICES_PER_PIT.getKey() + + "] index level setting." + ) + ); + pitReaderContext.close(); + threadPool.shutdown(); } } diff --git a/server/src/test/java/org/opensearch/search/SearchServiceTests.java b/server/src/test/java/org/opensearch/search/SearchServiceTests.java index 4e342875e4599..08a2ee155ccd4 100644 --- a/server/src/test/java/org/opensearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/opensearch/search/SearchServiceTests.java @@ -46,6 +46,8 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchShardTask; import org.opensearch.action.search.SearchType; +import org.opensearch.action.search.UpdatePitContextRequest; +import org.opensearch.action.search.UpdatePitContextResponse; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.WriteRequest; @@ -1406,7 +1408,7 @@ public void testOpenReaderContext() { createIndex("index"); SearchService searchService = getInstanceFromNode(SearchService.class); PlainActionFuture future = new PlainActionFuture<>(); - searchService.openReaderContext(new ShardId(resolveIndex("index"), 0), TimeValue.timeValueMinutes(between(1, 10)), future); + searchService.createPitReaderContext(new ShardId(resolveIndex("index"), 0), TimeValue.timeValueMinutes(between(1, 10)), future); future.actionGet(); assertThat(searchService.getActiveContexts(), equalTo(1)); assertTrue(searchService.freeReaderContext(future.actionGet())); @@ -1422,4 +1424,100 @@ private ReaderContext createReaderContext(IndexService indexService, IndexShard false ); } + + public void testPitContextMaxKeepAlive() { + createIndex("index"); + SearchService searchService = getInstanceFromNode(SearchService.class); + PlainActionFuture future = new PlainActionFuture<>(); + + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> { + searchService.createPitReaderContext(new ShardId(resolveIndex("index"), 0), TimeValue.timeValueHours(25), future); + future.actionGet(); + }); + assertEquals( + "Keep alive for request (1d) is too large. " + + "It must be less than (" + + SearchService.MAX_PIT_KEEPALIVE_SETTING.get(Settings.EMPTY) + + "). " + + "This limit can be set by changing the [" + + SearchService.MAX_PIT_KEEPALIVE_SETTING.getKey() + + "] cluster level setting.", + ex.getMessage() + ); + assertThat(searchService.getActiveContexts(), equalTo(0)); + } + + public void testUpdatePitId() { + createIndex("index"); + SearchService searchService = getInstanceFromNode(SearchService.class); + PlainActionFuture future = new PlainActionFuture<>(); + searchService.createPitReaderContext(new ShardId(resolveIndex("index"), 0), TimeValue.timeValueMinutes(between(1, 10)), future); + ShardSearchContextId id = future.actionGet(); + PlainActionFuture updateFuture = new PlainActionFuture<>(); + UpdatePitContextRequest updateRequest = new UpdatePitContextRequest( + id, + "pitId", + TimeValue.timeValueMinutes(between(1, 10)).millis(), + System.currentTimeMillis() + ); + searchService.updatePitIdAndKeepAlive(updateRequest, updateFuture); + UpdatePitContextResponse updateResponse = updateFuture.actionGet(); + assertTrue(updateResponse.getPitId().equalsIgnoreCase("pitId")); + assertTrue(updateResponse.getCreationTime() == updateRequest.getCreationTime()); + assertTrue(updateResponse.getKeepAlive() == updateRequest.getKeepAlive()); + assertTrue(updateResponse.getPitId().equalsIgnoreCase("pitId")); + assertThat(searchService.getActiveContexts(), equalTo(1)); + assertTrue(searchService.freeReaderContext(future.actionGet())); + } + + public void testUpdatePitIdMaxKeepAlive() { + createIndex("index"); + SearchService searchService = getInstanceFromNode(SearchService.class); + PlainActionFuture future = new PlainActionFuture<>(); + searchService.createPitReaderContext(new ShardId(resolveIndex("index"), 0), TimeValue.timeValueMinutes(between(1, 10)), future); + ShardSearchContextId id = future.actionGet(); + + UpdatePitContextRequest updateRequest = new UpdatePitContextRequest( + id, + "pitId", + TimeValue.timeValueHours(25).millis(), + System.currentTimeMillis() + ); + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> { + PlainActionFuture updateFuture = new PlainActionFuture<>(); + searchService.updatePitIdAndKeepAlive(updateRequest, updateFuture); + }); + + assertEquals( + "Keep alive for request (1d) is too large. " + + "It must be less than (" + + SearchService.MAX_PIT_KEEPALIVE_SETTING.get(Settings.EMPTY) + + "). " + + "This limit can be set by changing the [" + + SearchService.MAX_PIT_KEEPALIVE_SETTING.getKey() + + "] cluster level setting.", + ex.getMessage() + ); + assertThat(searchService.getActiveContexts(), equalTo(1)); + assertTrue(searchService.freeReaderContext(future.actionGet())); + } + + public void testUpdatePitIdWithInvalidReaderId() { + SearchService searchService = getInstanceFromNode(SearchService.class); + ShardSearchContextId id = new ShardSearchContextId("session", 9); + + UpdatePitContextRequest updateRequest = new UpdatePitContextRequest( + id, + "pitId", + TimeValue.timeValueHours(23).millis(), + System.currentTimeMillis() + ); + SearchContextMissingException ex = expectThrows(SearchContextMissingException.class, () -> { + PlainActionFuture updateFuture = new PlainActionFuture<>(); + searchService.updatePitIdAndKeepAlive(updateRequest, updateFuture); + }); + + assertEquals("No search context found for id [" + id.getId() + "]", ex.getMessage()); + assertThat(searchService.getActiveContexts(), equalTo(0)); + } } From 4466a1fb3e34807b9427b61c0f8a09b547d6984e Mon Sep 17 00:00:00 2001 From: Rishikesh Pasham <62345295+Rishikesh1159@users.noreply.github.com> Date: Wed, 20 Jul 2022 20:28:08 +0000 Subject: [PATCH 023/104] [Segment Replication] Fixing flaky test failure happening for testShardAlreadyReplicating() (#3943) * Fixing flaky test failure happening for testShardAlreadyReplicating() Signed-off-by: Rishikesh1159 --- .../SegmentReplicationTargetServiceTests.java | 56 +++++++++++++------ 1 file changed, 38 insertions(+), 18 deletions(-) diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java index ef67bd665dedf..7ff6e3dceabc9 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -23,6 +23,8 @@ import org.opensearch.transport.TransportService; import java.io.IOException; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; @@ -32,6 +34,7 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.times; import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.eq; public class SegmentReplicationTargetServiceTests extends IndexShardTestCase { @@ -40,6 +43,9 @@ public class SegmentReplicationTargetServiceTests extends IndexShardTestCase { private SegmentReplicationSource replicationSource; private SegmentReplicationTargetService sut; + private ReplicationCheckpoint initialCheckpoint; + private ReplicationCheckpoint aheadCheckpoint; + @Override public void setUp() throws Exception { super.setUp(); @@ -54,6 +60,14 @@ public void setUp() throws Exception { when(replicationSourceFactory.get(indexShard)).thenReturn(replicationSource); sut = new SegmentReplicationTargetService(threadPool, recoverySettings, transportService, replicationSourceFactory); + initialCheckpoint = indexShard.getLatestReplicationCheckpoint(); + aheadCheckpoint = new ReplicationCheckpoint( + initialCheckpoint.getShardId(), + initialCheckpoint.getPrimaryTerm(), + initialCheckpoint.getSegmentsGen(), + initialCheckpoint.getSeqNo(), + initialCheckpoint.getSegmentInfosVersion() + 1 + ); } @Override @@ -127,22 +141,36 @@ public void testAlreadyOnNewCheckpoint() { verify(spy, times(0)).startReplication(any(), any(), any()); } - public void testShardAlreadyReplicating() { - SegmentReplicationTargetService spy = spy(sut); - // Create a separate target and start it so the shard is already replicating. + public void testShardAlreadyReplicating() throws InterruptedException { + // Create a spy of Target Service so that we can verify invocation of startReplication call with specific checkpoint on it. + SegmentReplicationTargetService serviceSpy = spy(sut); final SegmentReplicationTarget target = new SegmentReplicationTarget( checkpoint, indexShard, replicationSource, mock(SegmentReplicationTargetService.SegmentReplicationListener.class) ); - final SegmentReplicationTarget spyTarget = Mockito.spy(target); - spy.startReplication(spyTarget); + // Create a Mockito spy of target to stub response of few method calls. + final SegmentReplicationTarget targetSpy = Mockito.spy(target); + CountDownLatch latch = new CountDownLatch(1); + // Mocking response when startReplication is called on targetSpy we send a new checkpoint to serviceSpy and later reduce countdown + // of latch. + doAnswer(invocation -> { + final ActionListener listener = invocation.getArgument(0); + // a new checkpoint arrives before we've completed. + serviceSpy.onNewCheckpoint(aheadCheckpoint, indexShard); + listener.onResponse(null); + latch.countDown(); + return null; + }).when(targetSpy).startReplication(any()); + doNothing().when(targetSpy).onDone(); - // a new checkpoint comes in for the same IndexShard. - spy.onNewCheckpoint(checkpoint, indexShard); - verify(spy, times(0)).startReplication(any(), any(), any()); - spyTarget.markAsDone(); + // start replication of this shard the first time. + serviceSpy.startReplication(targetSpy); + + // wait for the new checkpoint to arrive, before the listener completes. + latch.await(30, TimeUnit.SECONDS); + verify(serviceSpy, times(0)).startReplication(eq(aheadCheckpoint), eq(indexShard), any()); } public void testNewCheckpointBehindCurrentCheckpoint() { @@ -163,19 +191,11 @@ public void testNewCheckpoint_validationPassesAndReplicationFails() throws IOExc allowShardFailures(); SegmentReplicationTargetService spy = spy(sut); IndexShard spyShard = spy(indexShard); - ReplicationCheckpoint cp = indexShard.getLatestReplicationCheckpoint(); - ReplicationCheckpoint newCheckpoint = new ReplicationCheckpoint( - cp.getShardId(), - cp.getPrimaryTerm(), - cp.getSegmentsGen(), - cp.getSeqNo(), - cp.getSegmentInfosVersion() + 1 - ); ArgumentCaptor captor = ArgumentCaptor.forClass( SegmentReplicationTargetService.SegmentReplicationListener.class ); doNothing().when(spy).startReplication(any(), any(), any()); - spy.onNewCheckpoint(newCheckpoint, spyShard); + spy.onNewCheckpoint(aheadCheckpoint, spyShard); verify(spy, times(1)).startReplication(any(), any(), captor.capture()); SegmentReplicationTargetService.SegmentReplicationListener listener = captor.getValue(); listener.onFailure(new SegmentReplicationState(new ReplicationLuceneIndex()), new OpenSearchException("testing"), true); From 1510b942bc05a89928e576300119f62871e5a1fd Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Wed, 20 Jul 2022 15:23:51 -0700 Subject: [PATCH 024/104] Rename and deprecate public methods that contains 'master' in the name in 'server' directory (#3647) Signed-off-by: Tianli Feng --- .../opensearch/index/reindex/RetryTests.java | 2 +- .../azure/classic/AzureSimpleTests.java | 4 +- .../classic/AzureTwoStartedNodesTests.java | 4 +- .../discovery/gce/GceDiscoverTests.java | 4 +- ...ansportClusterStateActionDisruptionIT.java | 7 +- .../cluster/MinimumClusterManagerNodesIT.java | 6 +- .../cluster/SimpleClusterStateIT.java | 2 +- .../SpecificClusterManagerNodesIT.java | 32 ++-- .../coordination/RareClusterStateIT.java | 8 +- .../coordination/VotingConfigurationIT.java | 9 +- .../discovery/ClusterManagerDisruptionIT.java | 8 +- .../discovery/DiscoveryDisruptionIT.java | 2 +- .../StableClusterManagerDisruptionIT.java | 8 +- .../single/SingleNodeDiscoveryIT.java | 2 +- .../indices/recovery/IndexRecoveryIT.java | 2 +- .../AddVotingConfigExclusionsRequest.java | 6 +- .../cluster/health/ClusterHealthResponse.java | 12 +- .../health/TransportClusterHealthAction.java | 2 +- .../TransportCleanupRepositoryAction.java | 4 +- .../TransportClusterUpdateSettingsAction.java | 4 +- .../cluster/state/ClusterStateResponse.java | 2 +- .../state/TransportClusterStateAction.java | 2 +- .../stats/TransportClusterStatsAction.java | 2 +- .../TransportDeleteIndexTemplateAction.java | 2 +- .../put/TransportPutIndexTemplateAction.java | 2 +- .../action/bulk/TransportShardBulkAction.java | 4 +- .../TransportClusterManagerNodeAction.java | 8 +- .../cluster/ClusterChangedEvent.java | 12 +- .../ClusterManagerNodeChangePredicate.java | 4 +- .../org/opensearch/cluster/ClusterState.java | 8 +- .../cluster/ClusterStateObserver.java | 4 +- .../cluster/ClusterStateTaskExecutor.java | 12 +- .../cluster/ClusterStateTaskListener.java | 13 +- .../cluster/ClusterStateUpdateTask.java | 2 +- .../cluster/InternalClusterInfoService.java | 4 +- .../cluster/LocalClusterUpdateTask.java | 2 +- .../LocalNodeClusterManagerListener.java | 4 +- .../action/index/MappingUpdatedAction.java | 15 +- .../action/shard/ShardStateAction.java | 6 +- .../coordination/ClusterBootstrapService.java | 10 +- .../ClusterFormationFailureHelper.java | 9 +- .../coordination/CoordinationState.java | 4 +- .../cluster/coordination/Coordinator.java | 57 +++---- .../coordination/FollowersChecker.java | 2 +- .../cluster/coordination/JoinHelper.java | 30 ++-- .../coordination/JoinTaskExecutor.java | 30 ++-- .../cluster/coordination/LeaderChecker.java | 4 +- .../NodeRemovalClusterStateTaskExecutor.java | 2 +- .../cluster/coordination/PeersResponse.java | 11 +- .../cluster/coordination/Publication.java | 5 +- .../PublicationTransportHandler.java | 2 +- .../cluster/coordination/Reconfigurator.java | 4 +- .../UnsafeBootstrapClusterManagerCommand.java | 2 +- .../cluster/health/ClusterStateHealth.java | 10 +- .../MetadataIndexTemplateService.java | 32 +++- .../SystemIndexMetadataUpgradeService.java | 4 +- .../metadata/TemplateUpgradeService.java | 4 +- .../cluster/node/DiscoveryNode.java | 22 ++- .../cluster/node/DiscoveryNodes.java | 148 +++++++++++++++--- .../routing/BatchedRerouteService.java | 2 +- .../routing/DelayedAllocationService.java | 15 +- .../service/ClusterApplierService.java | 12 +- .../cluster/service/ClusterService.java | 22 ++- .../cluster/service/MasterService.java | 16 +- .../settings/ConsistentSettingsService.java | 4 +- .../HandshakingTransportAddressConnector.java | 2 +- .../org/opensearch/discovery/PeerFinder.java | 10 +- .../org/opensearch/env/NodeEnvironment.java | 4 +- .../opensearch/env/NodeRepurposeCommand.java | 2 +- .../java/org/opensearch/gateway/Gateway.java | 2 +- .../opensearch/gateway/GatewayMetaState.java | 4 +- .../opensearch/gateway/GatewayService.java | 29 ++-- .../gateway/LocalAllocateDangledIndices.java | 4 +- .../index/seqno/ReplicationTracker.java | 22 ++- .../opensearch/index/shard/IndexShard.java | 2 +- .../cluster/IndicesClusterStateService.java | 18 +-- .../main/java/org/opensearch/node/Node.java | 8 +- .../PersistentTasksClusterService.java | 8 +- .../repositories/RepositoriesService.java | 6 +- .../VerifyNodeRepositoryAction.java | 2 +- .../action/cat/RestClusterManagerAction.java | 2 +- .../rest/action/cat/RestHealthAction.java | 2 +- .../rest/action/cat/RestNodesAction.java | 2 +- .../InternalSnapshotsInfoService.java | 6 +- .../opensearch/snapshots/RestoreService.java | 6 +- .../snapshots/SnapshotShardsService.java | 4 +- .../snapshots/SnapshotsService.java | 14 +- .../transport/ConnectionProfile.java | 5 +- .../transport/SniffConnectionStrategy.java | 2 +- ...tAddVotingConfigExclusionsActionTests.java | 2 +- ...learVotingConfigExclusionsActionTests.java | 6 +- .../health/ClusterHealthResponsesTests.java | 16 +- .../reroute/ClusterRerouteResponseTests.java | 2 +- .../state/ClusterStateResponseTests.java | 2 +- .../TransportBroadcastByNodeActionTests.java | 8 +- ...ransportClusterManagerNodeActionTests.java | 16 +- .../nodes/TransportNodesActionTests.java | 2 +- ...ReplicationAllPermitsAcquisitionTests.java | 2 +- .../cluster/ClusterChangedEventTests.java | 14 +- .../opensearch/cluster/ClusterStateTests.java | 8 +- ...rnalClusterInfoServiceSchedulingTests.java | 4 +- .../index/MappingUpdatedActionTests.java | 4 +- .../action/shard/ShardStateActionTests.java | 8 +- .../coordination/CoordinatorTests.java | 4 +- .../coordination/FollowersCheckerTests.java | 2 +- .../coordination/JoinTaskExecutorTests.java | 8 +- .../coordination/LeaderCheckerTests.java | 4 +- .../cluster/coordination/NodeJoinTests.java | 6 +- .../coordination/PublicationTests.java | 2 +- .../health/ClusterStateHealthTests.java | 9 +- .../metadata/AutoExpandReplicasTests.java | 4 +- .../metadata/TemplateUpgradeServiceTests.java | 2 +- .../node/DiscoveryNodeRoleSettingTests.java | 4 +- .../cluster/node/DiscoveryNodesTests.java | 38 ++--- .../routing/BatchedRerouteServiceTests.java | 5 +- .../DelayedAllocationServiceTests.java | 10 +- ...storeInProgressAllocationDeciderTests.java | 4 +- .../ClusterSerializationTests.java | 2 +- .../ClusterStateToStringTests.java | 2 +- .../service/ClusterApplierServiceTests.java | 10 +- .../cluster/service/MasterServiceTests.java | 2 +- .../discovery/AbstractDisruptionTestCase.java | 15 +- .../discovery/PeerFinderMessagesTests.java | 6 +- .../opensearch/discovery/PeerFinderTests.java | 8 +- .../gateway/GatewayServiceTests.java | 2 +- .../IncrementalClusterStateWriterTests.java | 2 +- .../index/engine/InternalEngineTests.java | 6 +- .../index/engine/NoOpEngineTests.java | 4 +- ...PeerRecoveryRetentionLeaseExpiryTests.java | 6 +- ...ReplicationTrackerRetentionLeaseTests.java | 32 ++-- .../index/seqno/ReplicationTrackerTests.java | 44 +++--- .../indices/cluster/ClusterStateChanges.java | 10 +- ...ClusterStateServiceRandomUpdatesTests.java | 2 +- .../PersistentTasksClusterServiceTests.java | 10 +- .../PersistentTasksCustomMetadataTests.java | 4 +- .../InternalSnapshotsInfoServiceTests.java | 2 +- .../snapshots/SnapshotResiliencyTests.java | 14 +- .../ClusterStateCreationUtils.java | 14 +- .../AbstractCoordinatorTestCase.java | 23 +-- .../CoordinationStateTestCluster.java | 2 +- .../blobstore/BlobStoreTestUtil.java | 4 +- .../opensearch/test/ClusterServiceUtils.java | 4 +- .../opensearch/test/ExternalTestCluster.java | 2 +- .../opensearch/test/InternalTestCluster.java | 41 ++--- .../test/OpenSearchIntegTestCase.java | 4 +- .../test/test/InternalTestClusterTests.java | 2 +- 146 files changed, 827 insertions(+), 513 deletions(-) diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/RetryTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/RetryTests.java index 1c1a55bf59537..55234bcaaf0e2 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/RetryTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/RetryTests.java @@ -123,7 +123,7 @@ public void testReindexFromRemote() throws Exception { */ NodeInfo clusterManagerNode = null; for (NodeInfo candidate : client.admin().cluster().prepareNodesInfo().get().getNodes()) { - if (candidate.getNode().isMasterNode()) { + if (candidate.getNode().isClusterManagerNode()) { clusterManagerNode = candidate; } } diff --git a/plugins/discovery-azure-classic/src/internalClusterTest/java/org/opensearch/discovery/azure/classic/AzureSimpleTests.java b/plugins/discovery-azure-classic/src/internalClusterTest/java/org/opensearch/discovery/azure/classic/AzureSimpleTests.java index f78fb4617d198..4c2dda180eda6 100644 --- a/plugins/discovery-azure-classic/src/internalClusterTest/java/org/opensearch/discovery/azure/classic/AzureSimpleTests.java +++ b/plugins/discovery-azure-classic/src/internalClusterTest/java/org/opensearch/discovery/azure/classic/AzureSimpleTests.java @@ -51,7 +51,7 @@ public void testOneNodeShouldRunUsingPrivateIp() { final String node1 = internalCluster().startNode(settings); registerAzureNode(node1); assertNotNull( - client().admin().cluster().prepareState().setClusterManagerNodeTimeout("1s").get().getState().nodes().getMasterNodeId() + client().admin().cluster().prepareState().setClusterManagerNodeTimeout("1s").get().getState().nodes().getClusterManagerNodeId() ); // We expect having 1 node as part of the cluster, let's test that @@ -66,7 +66,7 @@ public void testOneNodeShouldRunUsingPublicIp() { final String node1 = internalCluster().startNode(settings); registerAzureNode(node1); assertNotNull( - client().admin().cluster().prepareState().setClusterManagerNodeTimeout("1s").get().getState().nodes().getMasterNodeId() + client().admin().cluster().prepareState().setClusterManagerNodeTimeout("1s").get().getState().nodes().getClusterManagerNodeId() ); // We expect having 1 node as part of the cluster, let's test that diff --git a/plugins/discovery-azure-classic/src/internalClusterTest/java/org/opensearch/discovery/azure/classic/AzureTwoStartedNodesTests.java b/plugins/discovery-azure-classic/src/internalClusterTest/java/org/opensearch/discovery/azure/classic/AzureTwoStartedNodesTests.java index f0af35092c8ca..10e8a65e906cc 100644 --- a/plugins/discovery-azure-classic/src/internalClusterTest/java/org/opensearch/discovery/azure/classic/AzureTwoStartedNodesTests.java +++ b/plugins/discovery-azure-classic/src/internalClusterTest/java/org/opensearch/discovery/azure/classic/AzureTwoStartedNodesTests.java @@ -54,14 +54,14 @@ public void testTwoNodesShouldRunUsingPrivateOrPublicIp() { final String node1 = internalCluster().startNode(settings); registerAzureNode(node1); assertNotNull( - client().admin().cluster().prepareState().setClusterManagerNodeTimeout("1s").get().getState().nodes().getMasterNodeId() + client().admin().cluster().prepareState().setClusterManagerNodeTimeout("1s").get().getState().nodes().getClusterManagerNodeId() ); logger.info("--> start another node"); final String node2 = internalCluster().startNode(settings); registerAzureNode(node2); assertNotNull( - client().admin().cluster().prepareState().setClusterManagerNodeTimeout("1s").get().getState().nodes().getMasterNodeId() + client().admin().cluster().prepareState().setClusterManagerNodeTimeout("1s").get().getState().nodes().getClusterManagerNodeId() ); // We expect having 2 nodes as part of the cluster, let's test that diff --git a/plugins/discovery-gce/src/internalClusterTest/java/org/opensearch/discovery/gce/GceDiscoverTests.java b/plugins/discovery-gce/src/internalClusterTest/java/org/opensearch/discovery/gce/GceDiscoverTests.java index de4f267547eb3..4f83c39dc4ef0 100644 --- a/plugins/discovery-gce/src/internalClusterTest/java/org/opensearch/discovery/gce/GceDiscoverTests.java +++ b/plugins/discovery-gce/src/internalClusterTest/java/org/opensearch/discovery/gce/GceDiscoverTests.java @@ -96,7 +96,7 @@ public void testJoin() { .clear() .setNodes(true) .get(); - assertNotNull(clusterStateResponse.getState().nodes().getMasterNodeId()); + assertNotNull(clusterStateResponse.getState().nodes().getClusterManagerNodeId()); // start another node final String secondNode = internalCluster().startNode(); @@ -109,7 +109,7 @@ public void testJoin() { .setNodes(true) .setLocal(true) .get(); - assertNotNull(clusterStateResponse.getState().nodes().getMasterNodeId()); + assertNotNull(clusterStateResponse.getState().nodes().getClusterManagerNodeId()); // wait for the cluster to form assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(2)).get()); diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/state/TransportClusterStateActionDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/state/TransportClusterStateActionDisruptionIT.java index 2dfe784e47350..3be75d672d823 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/state/TransportClusterStateActionDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/state/TransportClusterStateActionDisruptionIT.java @@ -82,7 +82,10 @@ public void testNonLocalRequestAlwaysFindsClusterManager() throws Exception { } catch (ClusterManagerNotDiscoveredException e) { return; // ok, we hit the disconnected node } - assertNotNull("should always contain a cluster-manager node", clusterStateResponse.getState().nodes().getMasterNodeId()); + assertNotNull( + "should always contain a cluster-manager node", + clusterStateResponse.getState().nodes().getClusterManagerNodeId() + ); }); } @@ -134,7 +137,7 @@ public void testNonLocalRequestAlwaysFindsClusterManagerAndWaitsForMetadata() th } if (clusterStateResponse.isWaitForTimedOut() == false) { final ClusterState state = clusterStateResponse.getState(); - assertNotNull("should always contain a cluster-manager node", state.nodes().getMasterNodeId()); + assertNotNull("should always contain a cluster-manager node", state.nodes().getClusterManagerNodeId()); assertThat("waited for metadata version", state.metadata().version(), greaterThanOrEqualTo(waitForMetadataVersion)); } }); diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumClusterManagerNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumClusterManagerNodesIT.java index 8da6040b6f996..c431dfaa14fa0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumClusterManagerNodesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumClusterManagerNodesIT.java @@ -168,7 +168,7 @@ public void testTwoNodesNoClusterManagerBlock() throws Exception { assertThat(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID), equalTo(true)); // verify that both nodes are still in the cluster state but there is no cluster-manager assertThat(state.nodes().getSize(), equalTo(2)); - assertThat(state.nodes().getMasterNode(), equalTo(null)); + assertThat(state.nodes().getClusterManagerNode(), equalTo(null)); logger.info("--> starting the previous cluster-manager node again..."); node2Name = internalCluster().startNode(Settings.builder().put(settings).put(clusterManagerDataPathSettings).build()); @@ -387,7 +387,7 @@ public void onFailure(String source, Exception e) { assertThat(failure.get(), instanceOf(FailedToCommitClusterStateException.class)); logger.debug("--> check that there is no cluster-manager in minor partition"); - assertBusy(() -> assertThat(clusterManagerClusterService.state().nodes().getMasterNode(), nullValue())); + assertBusy(() -> assertThat(clusterManagerClusterService.state().nodes().getClusterManagerNode(), nullValue())); // let major partition to elect new cluster-manager, to ensure that old cluster-manager is not elected once partition is restored, // otherwise persistent setting (which is a part of accepted state on old cluster-manager) will be propagated to other nodes @@ -401,7 +401,7 @@ public void onFailure(String source, Exception e) { .actionGet() .getState() .nodes() - .getMasterNode(); + .getClusterManagerNode(); assertThat(clusterManagerNode, notNullValue()); assertThat(clusterManagerNode.getName(), not(equalTo(clusterManager))); }); diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/SimpleClusterStateIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/SimpleClusterStateIT.java index e3adeb1ad8d82..df0052f82e43a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/SimpleClusterStateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/SimpleClusterStateIT.java @@ -473,7 +473,7 @@ public Collection createComponents( return; } - if (state.nodes().isLocalNodeElectedMaster()) { + if (state.nodes().isLocalNodeElectedClusterManager()) { if (state.custom("test") == null) { if (installed.compareAndSet(false, true)) { clusterService.submitStateUpdateTask("install-metadata-custom", new ClusterStateUpdateTask(Priority.URGENT) { diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/SpecificClusterManagerNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/SpecificClusterManagerNodesIT.java index ebdff162ca4e2..073f006d105f2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/SpecificClusterManagerNodesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/SpecificClusterManagerNodesIT.java @@ -69,7 +69,7 @@ public void testSimpleOnlyClusterManagerNodeElection() throws IOException { .actionGet() .getState() .nodes() - .getMasterNodeId(), + .getClusterManagerNodeId(), nullValue() ); fail("should not be able to find cluster-manager"); @@ -87,7 +87,7 @@ public void testSimpleOnlyClusterManagerNodeElection() throws IOException { .actionGet() .getState() .nodes() - .getMasterNode() + .getClusterManagerNode() .getName(), equalTo(clusterManagerNodeName) ); @@ -100,7 +100,7 @@ public void testSimpleOnlyClusterManagerNodeElection() throws IOException { .actionGet() .getState() .nodes() - .getMasterNode() + .getClusterManagerNode() .getName(), equalTo(clusterManagerNodeName) ); @@ -119,7 +119,7 @@ public void testSimpleOnlyClusterManagerNodeElection() throws IOException { .actionGet() .getState() .nodes() - .getMasterNodeId(), + .getClusterManagerNodeId(), nullValue() ); fail("should not be able to find cluster-manager"); @@ -140,7 +140,7 @@ public void testSimpleOnlyClusterManagerNodeElection() throws IOException { .actionGet() .getState() .nodes() - .getMasterNode() + .getClusterManagerNode() .getName(), equalTo(nextClusterManagerEligibleNodeName) ); @@ -153,7 +153,7 @@ public void testSimpleOnlyClusterManagerNodeElection() throws IOException { .actionGet() .getState() .nodes() - .getMasterNode() + .getClusterManagerNode() .getName(), equalTo(nextClusterManagerEligibleNodeName) ); @@ -173,7 +173,7 @@ public void testElectOnlyBetweenClusterManagerNodes() throws Exception { .actionGet() .getState() .nodes() - .getMasterNodeId(), + .getClusterManagerNodeId(), nullValue() ); fail("should not be able to find cluster-manager"); @@ -191,7 +191,7 @@ public void testElectOnlyBetweenClusterManagerNodes() throws Exception { .actionGet() .getState() .nodes() - .getMasterNode() + .getClusterManagerNode() .getName(), equalTo(clusterManagerNodeName) ); @@ -204,7 +204,7 @@ public void testElectOnlyBetweenClusterManagerNodes() throws Exception { .actionGet() .getState() .nodes() - .getMasterNode() + .getClusterManagerNode() .getName(), equalTo(clusterManagerNodeName) ); @@ -220,7 +220,7 @@ public void testElectOnlyBetweenClusterManagerNodes() throws Exception { .actionGet() .getState() .nodes() - .getMasterNode() + .getClusterManagerNode() .getName(), equalTo(clusterManagerNodeName) ); @@ -233,7 +233,7 @@ public void testElectOnlyBetweenClusterManagerNodes() throws Exception { .actionGet() .getState() .nodes() - .getMasterNode() + .getClusterManagerNode() .getName(), equalTo(clusterManagerNodeName) ); @@ -246,7 +246,7 @@ public void testElectOnlyBetweenClusterManagerNodes() throws Exception { .actionGet() .getState() .nodes() - .getMasterNode() + .getClusterManagerNode() .getName(), equalTo(clusterManagerNodeName) ); @@ -264,7 +264,7 @@ public void testElectOnlyBetweenClusterManagerNodes() throws Exception { .actionGet() .getState() .nodes() - .getMasterNode() + .getClusterManagerNode() .getName(), equalTo(nextClusterManagerEligableNodeName) ); @@ -277,7 +277,7 @@ public void testElectOnlyBetweenClusterManagerNodes() throws Exception { .actionGet() .getState() .nodes() - .getMasterNode() + .getClusterManagerNode() .getName(), equalTo(nextClusterManagerEligableNodeName) ); @@ -292,7 +292,7 @@ public void testElectOnlyBetweenClusterManagerNodes() throws Exception { .actionGet() .getState() .nodes() - .getMasterNode() + .getClusterManagerNode() .getName(), equalTo(nextClusterManagerEligableNodeName) ); @@ -305,7 +305,7 @@ public void testElectOnlyBetweenClusterManagerNodes() throws Exception { .actionGet() .getState() .nodes() - .getMasterNode() + .getClusterManagerNode() .getName(), equalTo(nextClusterManagerEligableNodeName) ); diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/RareClusterStateIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/RareClusterStateIT.java index 61b186c951ce8..90b9ff7cab3b5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/RareClusterStateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/RareClusterStateIT.java @@ -244,13 +244,13 @@ public void testDelayedMappingPropagationOnPrimary() throws Exception { // Check routing tables ClusterState state = client().admin().cluster().prepareState().get().getState(); - assertEquals(clusterManager, state.nodes().getMasterNode().getName()); + assertEquals(clusterManager, state.nodes().getClusterManagerNode().getName()); List shards = state.routingTable().allShards("index"); assertThat(shards, hasSize(1)); for (ShardRouting shard : shards) { if (shard.primary()) { // primary must not be on the cluster-manager node - assertFalse(state.nodes().getMasterNodeId().equals(shard.currentNodeId())); + assertFalse(state.nodes().getClusterManagerNodeId().equals(shard.currentNodeId())); } else { fail(); // only primaries } @@ -336,13 +336,13 @@ public void testDelayedMappingPropagationOnReplica() throws Exception { // Check routing tables ClusterState state = client().admin().cluster().prepareState().get().getState(); - assertEquals(clusterManager, state.nodes().getMasterNode().getName()); + assertEquals(clusterManager, state.nodes().getClusterManagerNode().getName()); List shards = state.routingTable().allShards("index"); assertThat(shards, hasSize(2)); for (ShardRouting shard : shards) { if (shard.primary()) { // primary must be on the cluster-manager - assertEquals(state.nodes().getMasterNodeId(), shard.currentNodeId()); + assertEquals(state.nodes().getClusterManagerNodeId(), shard.currentNodeId()); } else { assertTrue(shard.active()); } diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/VotingConfigurationIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/VotingConfigurationIT.java index 544565f4c1cd4..64474546f6106 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/VotingConfigurationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/VotingConfigurationIT.java @@ -105,7 +105,7 @@ public void testElectsNodeNotInVotingConfiguration() throws Exception { final Set votingConfiguration = clusterState.getLastCommittedConfiguration().getNodeIds(); assertThat(votingConfiguration, hasSize(3)); assertThat(clusterState.nodes().getSize(), equalTo(4)); - assertThat(votingConfiguration, hasItem(clusterState.nodes().getMasterNodeId())); + assertThat(votingConfiguration, hasItem(clusterState.nodes().getClusterManagerNodeId())); for (DiscoveryNode discoveryNode : clusterState.nodes()) { if (votingConfiguration.contains(discoveryNode.getId()) == false) { assertThat(excludedNodeName, nullValue()); @@ -155,7 +155,10 @@ public void testElectsNodeNotInVotingConfiguration() throws Exception { .setMetadata(true) .get() .getState(); - assertThat(newClusterState.nodes().getMasterNode().getName(), equalTo(excludedNodeName)); - assertThat(newClusterState.getLastCommittedConfiguration().getNodeIds(), hasItem(newClusterState.nodes().getMasterNodeId())); + assertThat(newClusterState.nodes().getClusterManagerNode().getName(), equalTo(excludedNodeName)); + assertThat( + newClusterState.getLastCommittedConfiguration().getNodeIds(), + hasItem(newClusterState.nodes().getClusterManagerNodeId()) + ); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java index 2f19d1e476c94..cd8846067ad1f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java @@ -171,7 +171,11 @@ public void testIsolateClusterManagerAndVerifyClusterStateConsensus() throws Exc try { assertEquals("unequal versions", state.version(), nodeState.version()); assertEquals("unequal node count", state.nodes().getSize(), nodeState.nodes().getSize()); - assertEquals("different cluster-managers ", state.nodes().getMasterNodeId(), nodeState.nodes().getMasterNodeId()); + assertEquals( + "different cluster-managers ", + state.nodes().getClusterManagerNodeId(), + nodeState.nodes().getClusterManagerNodeId() + ); assertEquals("different meta data version", state.metadata().version(), nodeState.metadata().version()); assertEquals("different routing", state.routingTable().toString(), nodeState.routingTable().toString()); } catch (AssertionError t) { @@ -238,7 +242,7 @@ public void testVerifyApiBlocksDuringPartition() throws Exception { for (String node : partitions.getMajoritySide()) { ClusterState nodeState = getNodeClusterState(node); boolean success = true; - if (nodeState.nodes().getMasterNode() == null) { + if (nodeState.nodes().getClusterManagerNode() == null) { success = false; } if (!nodeState.blocks().global().isEmpty()) { diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java index c6e4d95449d42..1825fbdcf32d5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java @@ -71,7 +71,7 @@ public void testClusterJoinDespiteOfPublishingIssues() throws Exception { TransportService clusterManagerTranspotService = internalCluster().getInstance( TransportService.class, - discoveryNodes.getMasterNode().getName() + discoveryNodes.getClusterManagerNode().getName() ); logger.info("blocking requests from non cluster-manager [{}] to cluster-manager [{}]", nonClusterManagerNode, clusterManagerNode); diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/StableClusterManagerDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/StableClusterManagerDisruptionIT.java index fcc2b7a7d5ad4..8992e4749c1aa 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/StableClusterManagerDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/StableClusterManagerDisruptionIT.java @@ -129,7 +129,7 @@ public void testFailWithMinimumClusterManagerNodesConfigured() throws Exception private void ensureNoMaster(String node) throws Exception { assertBusy( () -> assertNull( - client(node).admin().cluster().state(new ClusterStateRequest().local(true)).get().getState().nodes().getMasterNode() + client(node).admin().cluster().state(new ClusterStateRequest().local(true)).get().getState().nodes().getClusterManagerNode() ) ); } @@ -220,8 +220,8 @@ public void testStaleClusterManagerNotHijackingMajority() throws Exception { for (final String node : majoritySide) { clusterManagers.put(node, new ArrayList<>()); internalCluster().getInstance(ClusterService.class, node).addListener(event -> { - DiscoveryNode previousClusterManager = event.previousState().nodes().getMasterNode(); - DiscoveryNode currentClusterManager = event.state().nodes().getMasterNode(); + DiscoveryNode previousClusterManager = event.previousState().nodes().getClusterManagerNode(); + DiscoveryNode currentClusterManager = event.state().nodes().getClusterManagerNode(); if (!Objects.equals(previousClusterManager, currentClusterManager)) { logger.info( "--> node {} received new cluster state: {} \n and had previous cluster state: {}", @@ -238,7 +238,7 @@ public void testStaleClusterManagerNotHijackingMajority() throws Exception { final CountDownLatch oldClusterManagerNodeSteppedDown = new CountDownLatch(1); internalCluster().getInstance(ClusterService.class, oldClusterManagerNode).addListener(event -> { - if (event.state().nodes().getMasterNodeId() == null) { + if (event.state().nodes().getClusterManagerNodeId() == null) { oldClusterManagerNodeSteppedDown.countDown(); } }); diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/single/SingleNodeDiscoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/single/SingleNodeDiscoveryIT.java index 13aea01772424..47df0aeced3cb 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/single/SingleNodeDiscoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/single/SingleNodeDiscoveryIT.java @@ -112,7 +112,7 @@ public Path nodeConfigPath(int nodeOrdinal) { final ClusterState second = other.getInstance(ClusterService.class).state(); assertThat(first.nodes().getSize(), equalTo(1)); assertThat(second.nodes().getSize(), equalTo(1)); - assertThat(first.nodes().getMasterNodeId(), not(equalTo(second.nodes().getMasterNodeId()))); + assertThat(first.nodes().getClusterManagerNodeId(), not(equalTo(second.nodes().getClusterManagerNodeId()))); assertThat(first.metadata().clusterUUID(), not(equalTo(second.metadata().clusterUUID()))); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java index 0ab3be3d63091..82131b0260f49 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java @@ -1511,7 +1511,7 @@ public void testOngoingRecoveryAndClusterManagerFailOver() throws Exception { ); phase1ReadyBlocked.await(); internalCluster().restartNode( - clusterService().state().nodes().getMasterNode().getName(), + clusterService().state().nodes().getClusterManagerNode().getName(), new InternalTestCluster.RestartCallback() ); internalCluster().ensureAtLeastNumDataNodes(3); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java index a8035a10e8d91..a2a77a1316898 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java @@ -132,7 +132,7 @@ Set resolveVotingConfigExclusions(ClusterState currentSta if (nodeDescriptions.length >= 1) { newVotingConfigExclusions = Arrays.stream(allNodes.resolveNodes(nodeDescriptions)) .map(allNodes::get) - .filter(DiscoveryNode::isMasterNode) + .filter(DiscoveryNode::isClusterManagerNode) .map(VotingConfigExclusion::new) .collect(Collectors.toSet()); @@ -147,7 +147,7 @@ Set resolveVotingConfigExclusions(ClusterState currentSta for (String nodeId : nodeIds) { if (allNodes.nodeExists(nodeId)) { DiscoveryNode discoveryNode = allNodes.get(nodeId); - if (discoveryNode.isMasterNode()) { + if (discoveryNode.isClusterManagerNode()) { newVotingConfigExclusions.add(new VotingConfigExclusion(discoveryNode)); } } else { @@ -162,7 +162,7 @@ Set resolveVotingConfigExclusions(ClusterState currentSta for (String nodeName : nodeNames) { if (existingNodes.containsKey(nodeName)) { DiscoveryNode discoveryNode = existingNodes.get(nodeName); - if (discoveryNode.isMasterNode()) { + if (discoveryNode.isClusterManagerNode()) { newVotingConfigExclusions.add(new VotingConfigExclusion(discoveryNode)); } } else { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java index a67ef721879ce..8cabdff02390c 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java @@ -284,8 +284,14 @@ public int getNumberOfDataNodes() { return clusterStateHealth.getNumberOfDataNodes(); } + public boolean hasDiscoveredClusterManager() { + return clusterStateHealth.hasDiscoveredClusterManager(); + } + + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #hasDiscoveredClusterManager()} */ + @Deprecated public boolean hasDiscoveredMaster() { - return clusterStateHealth.hasDiscoveredMaster(); + return hasDiscoveredClusterManager(); } public int getNumberOfPendingTasks() { @@ -385,8 +391,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(TIMED_OUT, isTimedOut()); builder.field(NUMBER_OF_NODES, getNumberOfNodes()); builder.field(NUMBER_OF_DATA_NODES, getNumberOfDataNodes()); - builder.field(DISCOVERED_MASTER, hasDiscoveredMaster()); // the field will be removed in a future major version - builder.field(DISCOVERED_CLUSTER_MANAGER, hasDiscoveredMaster()); + builder.field(DISCOVERED_MASTER, hasDiscoveredClusterManager()); // the field will be removed in a future major version + builder.field(DISCOVERED_CLUSTER_MANAGER, hasDiscoveredClusterManager()); builder.field(ACTIVE_PRIMARY_SHARDS, getActivePrimaryShards()); builder.field(ACTIVE_SHARDS, getActiveShards()); builder.field(RELOCATING_SHARDS, getRelocatingShards()); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java index 3e3d373ff3b31..a15fc15ef6a2b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -221,7 +221,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS } @Override - public void onNoLongerMaster(String source) { + public void onNoLongerClusterManager(String source) { logger.trace( "stopped being cluster-manager while waiting for events with priority [{}]. retrying.", request.waitForEvents() diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java index 25513e6c9d7da..f3c5d51e1907a 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java @@ -126,14 +126,14 @@ public TransportCleanupRepositoryAction( // We add a state applier that will remove any dangling repository cleanup actions on cluster-manager failover. // This is safe to do since cleanups will increment the repository state id before executing any operations to prevent concurrent // operations from corrupting the repository. This is the same safety mechanism used by snapshot deletes. - if (DiscoveryNode.isMasterNode(clusterService.getSettings())) { + if (DiscoveryNode.isClusterManagerNode(clusterService.getSettings())) { addClusterStateApplier(clusterService); } } private static void addClusterStateApplier(ClusterService clusterService) { clusterService.addStateApplier(event -> { - if (event.localNodeMaster() && event.previousState().nodes().isLocalNodeElectedMaster() == false) { + if (event.localNodeClusterManager() && event.previousState().nodes().isLocalNodeElectedClusterManager() == false) { final RepositoryCleanupInProgress repositoryCleanupInProgress = event.state() .custom(RepositoryCleanupInProgress.TYPE, RepositoryCleanupInProgress.EMPTY); if (repositoryCleanupInProgress.hasCleanupInProgress() == false) { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java index 0799a8bd22b45..47024e98f82b1 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java @@ -163,7 +163,7 @@ private void reroute(final boolean updateSettingsAcked) { // We're about to send a second update task, so we need to check if we're still the elected cluster-manager // For example the minimum_master_node could have been breached and we're no longer elected cluster-manager, // so we should *not* execute the reroute. - if (!clusterService.state().nodes().isLocalNodeElectedMaster()) { + if (!clusterService.state().nodes().isLocalNodeElectedClusterManager()) { logger.debug("Skipping reroute after cluster update settings, because node is no longer cluster-manager"); listener.onResponse( new ClusterUpdateSettingsResponse( @@ -201,7 +201,7 @@ protected ClusterUpdateSettingsResponse newResponse(boolean acknowledged) { } @Override - public void onNoLongerMaster(String source) { + public void onNoLongerClusterManager(String source) { logger.debug( "failed to preform reroute after cluster settings were updated - current node is no longer a cluster-manager" ); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java index ec2697fbd7339..89cd112d30c79 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java @@ -129,7 +129,7 @@ private static String getClusterManagerNodeId(ClusterState clusterState) { } DiscoveryNodes nodes = clusterState.getNodes(); if (nodes != null) { - return nodes.getMasterNodeId(); + return nodes.getClusterManagerNodeId(); } else { return null; } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java index b2e09629fc501..a30b6fd580f1a 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -127,7 +127,7 @@ protected void masterOperation( final Predicate acceptableClusterStateOrNotMasterPredicate = request.local() ? acceptableClusterStatePredicate - : acceptableClusterStatePredicate.or(clusterState -> clusterState.nodes().isLocalNodeElectedMaster() == false); + : acceptableClusterStatePredicate.or(clusterState -> clusterState.nodes().isLocalNodeElectedClusterManager() == false); if (acceptableClusterStatePredicate.test(state)) { ActionListener.completeWith(listener, () -> buildResponse(request, state)); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java index 984735a648dc7..a13932e137ab0 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -197,7 +197,7 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq } ClusterHealthStatus clusterStatus = null; - if (clusterService.state().nodes().isLocalNodeElectedMaster()) { + if (clusterService.state().nodes().isLocalNodeElectedClusterManager()) { clusterStatus = new ClusterStateHealth(clusterService.state()).getStatus(); } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java index a75b1877a6b11..80f008072b024 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java @@ -108,7 +108,7 @@ protected void masterOperation( final ActionListener listener ) { indexTemplateService.removeTemplates( - new MetadataIndexTemplateService.RemoveRequest(request.name()).masterTimeout(request.clusterManagerNodeTimeout()), + new MetadataIndexTemplateService.RemoveRequest(request.name()).clusterManagerTimeout(request.clusterManagerNodeTimeout()), new MetadataIndexTemplateService.RemoveListener() { @Override public void onResponse(MetadataIndexTemplateService.RemoveResponse response) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java index fbc7ea6e78af5..8e8bcbab6f2b7 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java @@ -125,7 +125,7 @@ protected void masterOperation( .mappings(request.mappings()) .aliases(request.aliases()) .create(request.create()) - .masterTimeout(request.clusterManagerNodeTimeout()) + .clusterManagerTimeout(request.clusterManagerNodeTimeout()) .version(request.version()), new MetadataIndexTemplateService.PutListener() { diff --git a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java index f1c3e56b02eca..56fb688290002 100644 --- a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java @@ -169,7 +169,7 @@ protected void dispatchedShardOperationOnPrimary( performOnPrimary(request, primary, updateHelper, threadPool::absoluteTimeInMillis, (update, shardId, mappingListener) -> { assert update != null; assert shardId != null; - mappingUpdatedAction.updateMappingOnMaster(shardId.getIndex(), update, mappingListener); + mappingUpdatedAction.updateMappingOnClusterManager(shardId.getIndex(), update, mappingListener); }, mappingUpdateListener -> observer.waitForNextChange(new ClusterStateObserver.Listener() { @Override public void onNewClusterState(ClusterState state) { @@ -626,7 +626,7 @@ private static Engine.Result performOpOnReplica( } if (result.getResultType() == Engine.Result.Type.MAPPING_UPDATE_REQUIRED) { // Even though the primary waits on all nodes to ack the mapping changes to the cluster-manager - // (see MappingUpdatedAction.updateMappingOnMaster) we still need to protect against missing mappings + // (see MappingUpdatedAction.updateMappingOnClusterManager) we still need to protect against missing mappings // and wait for them. The reason is concurrent requests. Request r1 which has new field f triggers a // mapping update. Assume that that update is first applied on the primary, and only later on the replica // (it’s happening concurrently). Request r2, which now arrives on the primary and which also has the new diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java index 05b2f5eb168d8..a30189a0899a4 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java @@ -165,7 +165,7 @@ class AsyncSingleAction { protected void doStart(ClusterState clusterState) { try { final DiscoveryNodes nodes = clusterState.nodes(); - if (nodes.isLocalNodeElectedMaster() || localExecute(request)) { + if (nodes.isLocalNodeElectedClusterManager() || localExecute(request)) { // check for block, if blocked, retry, else, execute locally final ClusterBlockException blockException = checkBlock(request, clusterState); if (blockException != null) { @@ -204,11 +204,11 @@ protected void doStart(ClusterState clusterState) { .execute(ActionRunnable.wrap(delegate, l -> masterOperation(task, request, clusterState, l))); } } else { - if (nodes.getMasterNode() == null) { + if (nodes.getClusterManagerNode() == null) { logger.debug("no known cluster-manager node, scheduling a retry"); retryOnMasterChange(clusterState, null); } else { - DiscoveryNode clusterManagerNode = nodes.getMasterNode(); + DiscoveryNode clusterManagerNode = nodes.getClusterManagerNode(); final String actionName = getClusterManagerActionName(clusterManagerNode); transportService.sendRequest( clusterManagerNode, @@ -225,7 +225,7 @@ public void handleException(final TransportException exp) { "connection exception while trying to forward request with action name [{}] to " + "master node [{}], scheduling a retry. Error: [{}]", actionName, - nodes.getMasterNode(), + nodes.getClusterManagerNode(), exp.getDetailedMessage() ); retryOnMasterChange(clusterState, cause); diff --git a/server/src/main/java/org/opensearch/cluster/ClusterChangedEvent.java b/server/src/main/java/org/opensearch/cluster/ClusterChangedEvent.java index dd4f6c59fabaa..7e5de435267b4 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterChangedEvent.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterChangedEvent.java @@ -213,8 +213,18 @@ public boolean blocksChanged() { /** * Returns true iff the local node is the mater node of the cluster. */ + public boolean localNodeClusterManager() { + return state.nodes().isLocalNodeElectedClusterManager(); + } + + /** + * Returns true iff the local node is the mater node of the cluster. + * + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #localNodeClusterManager()} + */ + @Deprecated public boolean localNodeMaster() { - return state.nodes().isLocalNodeElectedMaster(); + return localNodeClusterManager(); } /** diff --git a/server/src/main/java/org/opensearch/cluster/ClusterManagerNodeChangePredicate.java b/server/src/main/java/org/opensearch/cluster/ClusterManagerNodeChangePredicate.java index ecb99e06f3ef0..b5c65dacb9542 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterManagerNodeChangePredicate.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterManagerNodeChangePredicate.java @@ -53,10 +53,10 @@ private ClusterManagerNodeChangePredicate() { */ public static Predicate build(ClusterState currentState) { final long currentVersion = currentState.version(); - final DiscoveryNode clusterManagerNode = currentState.nodes().getMasterNode(); + final DiscoveryNode clusterManagerNode = currentState.nodes().getClusterManagerNode(); final String currentMasterId = clusterManagerNode == null ? null : clusterManagerNode.getEphemeralId(); return newState -> { - final DiscoveryNode newClusterManager = newState.nodes().getMasterNode(); + final DiscoveryNode newClusterManager = newState.nodes().getClusterManagerNode(); final boolean accept; if (newClusterManager == null) { accept = false; diff --git a/server/src/main/java/org/opensearch/cluster/ClusterState.java b/server/src/main/java/org/opensearch/cluster/ClusterState.java index 5431bbdbeaf38..491166639b086 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterState.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterState.java @@ -404,8 +404,8 @@ public String toString() { * In essence that means that all the changes from the other cluster state are also reflected by the current one */ public boolean supersedes(ClusterState other) { - return this.nodes().getMasterNodeId() != null - && this.nodes().getMasterNodeId().equals(other.nodes().getMasterNodeId()) + return this.nodes().getClusterManagerNodeId() != null + && this.nodes().getClusterManagerNodeId().equals(other.nodes().getClusterManagerNodeId()) && this.version() > other.version(); } @@ -485,12 +485,12 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } if (metrics.contains(Metric.MASTER_NODE)) { - builder.field("master_node", nodes().getMasterNodeId()); + builder.field("master_node", nodes().getClusterManagerNodeId()); } // Value of the field is identical with the above, and aims to replace the above field. if (metrics.contains(Metric.CLUSTER_MANAGER_NODE)) { - builder.field("cluster_manager_node", nodes().getMasterNodeId()); + builder.field("cluster_manager_node", nodes().getClusterManagerNodeId()); } if (metrics.contains(Metric.BLOCKS)) { diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateObserver.java b/server/src/main/java/org/opensearch/cluster/ClusterStateObserver.java index 7ab5785bac350..7945afd120350 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateObserver.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateObserver.java @@ -311,7 +311,7 @@ private static class StoredState { private final long version; StoredState(ClusterState clusterState) { - this.clusterManagerNodeId = clusterState.nodes().getMasterNodeId(); + this.clusterManagerNodeId = clusterState.nodes().getClusterManagerNodeId(); this.version = clusterState.version(); } @@ -320,7 +320,7 @@ private static class StoredState { * */ public boolean isOlderOrDifferentClusterManager(ClusterState clusterState) { return version < clusterState.version() - || Objects.equals(clusterManagerNodeId, clusterState.nodes().getMasterNodeId()) == false; + || Objects.equals(clusterManagerNodeId, clusterState.nodes().getClusterManagerNodeId()) == false; } } diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java index 00e58d88d8798..976019ae77d6c 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java @@ -52,10 +52,20 @@ public interface ClusterStateTaskExecutor { /** * indicates whether this executor should only run if the current node is cluster-manager */ - default boolean runOnlyOnMaster() { + default boolean runOnlyOnClusterManager() { return true; } + /** + * indicates whether this executor should only run if the current node is cluster-manager + * + * @deprecated As of 2.1, because supporting inclusive language, replaced by {@link #runOnlyOnClusterManager()} + */ + @Deprecated + default boolean runOnlyOnMaster() { + return runOnlyOnClusterManager(); + } + /** * Callback invoked after new cluster state is published. Note that * this method is not invoked if the cluster state was not updated. diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java index 74cc118892579..37108d356d86a 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java @@ -51,10 +51,21 @@ public interface ClusterStateTaskListener { * called when the task was rejected because the local node is no longer cluster-manager. * Used only for tasks submitted to {@link MasterService}. */ - default void onNoLongerMaster(String source) { + default void onNoLongerClusterManager(String source) { onFailure(source, new NotClusterManagerException("no longer cluster-manager. source: [" + source + "]")); } + /** + * called when the task was rejected because the local node is no longer cluster-manager. + * Used only for tasks submitted to {@link MasterService}. + * + * @deprecated As of 2.1, because supporting inclusive language, replaced by {@link #onNoLongerClusterManager(String)} + */ + @Deprecated + default void onNoLongerMaster(String source) { + onNoLongerClusterManager(source); + } + /** * Called when the result of the {@link ClusterStateTaskExecutor#execute(ClusterState, List)} have been processed * properly by all listeners. diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateUpdateTask.java b/server/src/main/java/org/opensearch/cluster/ClusterStateUpdateTask.java index f9de49a1f7e58..9225914a931b2 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateUpdateTask.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateUpdateTask.java @@ -107,7 +107,7 @@ public Priority priority() { * For local requests, use {@link LocalClusterUpdateTask} instead. */ @Override - public final boolean runOnlyOnMaster() { + public final boolean runOnlyOnClusterManager() { return true; } } diff --git a/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java b/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java index 97b6f81f35449..5d0ba2c67e961 100644 --- a/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java +++ b/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java @@ -151,14 +151,14 @@ void setUpdateFrequency(TimeValue updateFrequency) { @Override public void clusterChanged(ClusterChangedEvent event) { - if (event.localNodeMaster() && refreshAndRescheduleRunnable.get() == null) { + if (event.localNodeClusterManager() && refreshAndRescheduleRunnable.get() == null) { logger.trace("elected as cluster-manager, scheduling cluster info update tasks"); executeRefresh(event.state(), "became cluster-manager"); final RefreshAndRescheduleRunnable newRunnable = new RefreshAndRescheduleRunnable(); refreshAndRescheduleRunnable.set(newRunnable); threadPool.scheduleUnlessShuttingDown(updateFrequency, REFRESH_EXECUTOR, newRunnable); - } else if (event.localNodeMaster() == false) { + } else if (event.localNodeClusterManager() == false) { refreshAndRescheduleRunnable.set(null); return; } diff --git a/server/src/main/java/org/opensearch/cluster/LocalClusterUpdateTask.java b/server/src/main/java/org/opensearch/cluster/LocalClusterUpdateTask.java index dfa02b60ee9dc..d730be1f2afb6 100644 --- a/server/src/main/java/org/opensearch/cluster/LocalClusterUpdateTask.java +++ b/server/src/main/java/org/opensearch/cluster/LocalClusterUpdateTask.java @@ -91,7 +91,7 @@ public Priority priority() { } @Override - public final boolean runOnlyOnMaster() { + public final boolean runOnlyOnClusterManager() { return false; } } diff --git a/server/src/main/java/org/opensearch/cluster/LocalNodeClusterManagerListener.java b/server/src/main/java/org/opensearch/cluster/LocalNodeClusterManagerListener.java index 42a6438ff125e..c86aa00a6f2a2 100644 --- a/server/src/main/java/org/opensearch/cluster/LocalNodeClusterManagerListener.java +++ b/server/src/main/java/org/opensearch/cluster/LocalNodeClusterManagerListener.java @@ -51,8 +51,8 @@ public interface LocalNodeClusterManagerListener extends ClusterStateListener { @Override default void clusterChanged(ClusterChangedEvent event) { - final boolean wasClusterManager = event.previousState().nodes().isLocalNodeElectedMaster(); - final boolean isClusterManager = event.localNodeMaster(); + final boolean wasClusterManager = event.previousState().nodes().isLocalNodeElectedClusterManager(); + final boolean isClusterManager = event.localNodeClusterManager(); if (wasClusterManager == false && isClusterManager) { onMaster(); } else if (wasClusterManager && isClusterManager == false) { diff --git a/server/src/main/java/org/opensearch/cluster/action/index/MappingUpdatedAction.java b/server/src/main/java/org/opensearch/cluster/action/index/MappingUpdatedAction.java index 2c4eff5f6d00a..c1c49e1018197 100644 --- a/server/src/main/java/org/opensearch/cluster/action/index/MappingUpdatedAction.java +++ b/server/src/main/java/org/opensearch/cluster/action/index/MappingUpdatedAction.java @@ -111,7 +111,7 @@ public void setClient(Client client) { * {@code timeout} is the cluster-manager node timeout ({@link ClusterManagerNodeRequest#clusterManagerNodeTimeout()}), * potentially waiting for a cluster-manager node to be available. */ - public void updateMappingOnMaster(Index index, Mapping mappingUpdate, ActionListener listener) { + public void updateMappingOnClusterManager(Index index, Mapping mappingUpdate, ActionListener listener) { final RunOnce release = new RunOnce(() -> semaphore.release()); try { @@ -132,6 +132,19 @@ public void updateMappingOnMaster(Index index, Mapping mappingUpdate, ActionList } } + /** + * Update mappings on the cluster-manager node, waiting for the change to be committed, + * but not for the mapping update to be applied on all nodes. The timeout specified by + * {@code timeout} is the cluster-manager node timeout ({@link ClusterManagerNodeRequest#clusterManagerNodeTimeout()}), + * potentially waiting for a cluster-manager node to be available. + * + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #updateMappingOnClusterManager(Index, Mapping, ActionListener)} + */ + @Deprecated + public void updateMappingOnMaster(Index index, Mapping mappingUpdate, ActionListener listener) { + updateMappingOnClusterManager(index, mappingUpdate, listener); + } + // used by tests int blockedThreads() { return semaphore.getQueueLength(); diff --git a/server/src/main/java/org/opensearch/cluster/action/shard/ShardStateAction.java b/server/src/main/java/org/opensearch/cluster/action/shard/ShardStateAction.java index 81a365df9866d..002c5fd3b89db 100644 --- a/server/src/main/java/org/opensearch/cluster/action/shard/ShardStateAction.java +++ b/server/src/main/java/org/opensearch/cluster/action/shard/ShardStateAction.java @@ -39,12 +39,12 @@ import org.opensearch.ExceptionsHelper; import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterChangedEvent; +import org.opensearch.cluster.ClusterManagerNodeChangePredicate; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateObserver; import org.opensearch.cluster.ClusterStateTaskConfig; import org.opensearch.cluster.ClusterStateTaskExecutor; import org.opensearch.cluster.ClusterStateTaskListener; -import org.opensearch.cluster.ClusterManagerNodeChangePredicate; import org.opensearch.cluster.NotClusterManagerException; import org.opensearch.cluster.coordination.FailedToCommitClusterStateException; import org.opensearch.cluster.metadata.IndexMetadata; @@ -182,7 +182,7 @@ private void sendShardAction( final ActionListener listener ) { ClusterStateObserver observer = new ClusterStateObserver(currentState, clusterService, null, logger, threadPool.getThreadContext()); - DiscoveryNode clusterManagerNode = currentState.nodes().getMasterNode(); + DiscoveryNode clusterManagerNode = currentState.nodes().getClusterManagerNode(); Predicate changePredicate = ClusterManagerNodeChangePredicate.build(currentState); if (clusterManagerNode == null) { logger.warn("no cluster-manager known for action [{}] for shard entry [{}]", actionName, request); @@ -385,7 +385,7 @@ public void onFailure(String source, Exception e) { } @Override - public void onNoLongerMaster(String source) { + public void onNoLongerClusterManager(String source) { logger.error("{} no longer cluster-manager while failing shard [{}]", request.shardId, request); try { channel.sendResponse(new NotClusterManagerException(source)); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/ClusterBootstrapService.java b/server/src/main/java/org/opensearch/cluster/coordination/ClusterBootstrapService.java index cdf673c00fe56..addd2b3e46597 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/ClusterBootstrapService.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/ClusterBootstrapService.java @@ -134,7 +134,7 @@ public ClusterBootstrapService( + "]" ); } - if (DiscoveryNode.isMasterNode(settings) == false) { + if (DiscoveryNode.isClusterManagerNode(settings) == false) { throw new IllegalArgumentException( "node with [" + DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey() @@ -176,7 +176,7 @@ public static boolean discoveryIsConfigured(Settings settings) { void onFoundPeersUpdated() { final Set nodes = getDiscoveredNodes(); if (bootstrappingPermitted.get() - && transportService.getLocalNode().isMasterNode() + && transportService.getLocalNode().isClusterManagerNode() && bootstrapRequirements.isEmpty() == false && isBootstrappedSupplier.getAsBoolean() == false && nodes.stream().noneMatch(Coordinator::isZen1Node)) { @@ -219,7 +219,7 @@ void scheduleUnconfiguredBootstrap() { return; } - if (transportService.getLocalNode().isMasterNode() == false) { + if (transportService.getLocalNode().isClusterManagerNode() == false) { return; } @@ -257,7 +257,7 @@ private Set getDiscoveredNodes() { } private void startBootstrap(Set discoveryNodes, List unsatisfiedRequirements) { - assert discoveryNodes.stream().allMatch(DiscoveryNode::isMasterNode) : discoveryNodes; + assert discoveryNodes.stream().allMatch(DiscoveryNode::isClusterManagerNode) : discoveryNodes; assert discoveryNodes.stream().noneMatch(Coordinator::isZen1Node) : discoveryNodes; assert unsatisfiedRequirements.size() < discoveryNodes.size() : discoveryNodes + " smaller than " + unsatisfiedRequirements; if (bootstrappingPermitted.compareAndSet(true, false)) { @@ -277,7 +277,7 @@ public static boolean isBootstrapPlaceholder(String nodeId) { } private void doBootstrap(VotingConfiguration votingConfiguration) { - assert transportService.getLocalNode().isMasterNode(); + assert transportService.getLocalNode().isClusterManagerNode(); try { votingConfigurationConsumer.accept(votingConfiguration); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelper.java b/server/src/main/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelper.java index e93eddc02337e..434bd5e562c4b 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelper.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelper.java @@ -184,9 +184,10 @@ String getDescription() { if (statusInfo.getStatus() == UNHEALTHY) { return String.format(Locale.ROOT, "this node is unhealthy: %s", statusInfo.getInfo()); } - final List clusterStateNodes = StreamSupport.stream(clusterState.nodes().getMasterNodes().values().spliterator(), false) - .map(n -> n.value.toString()) - .collect(Collectors.toList()); + final List clusterStateNodes = StreamSupport.stream( + clusterState.nodes().getClusterManagerNodes().values().spliterator(), + false + ).map(n -> n.value.toString()).collect(Collectors.toList()); final String discoveryWillContinueDescription = String.format( Locale.ROOT, @@ -206,7 +207,7 @@ String getDescription() { discoveryWillContinueDescription ); - if (clusterState.nodes().getLocalNode().isMasterNode() == false) { + if (clusterState.nodes().getLocalNode().isClusterManagerNode() == false) { return String.format(Locale.ROOT, "cluster-manager not discovered yet: %s", discoveryStateIgnoringQuorum); } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java index fcae32af9cad5..08cd7d0ab02db 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java @@ -413,7 +413,7 @@ public PublishResponse handlePublishRequest(PublishRequest publishRequest) { } if (clusterState.term() == getLastAcceptedTerm() && clusterState.version() <= getLastAcceptedVersion()) { if (clusterState.term() == ZEN1_BWC_TERM - && clusterState.nodes().getMasterNode().equals(getLastAcceptedState().nodes().getMasterNode()) == false) { + && clusterState.nodes().getClusterManagerNode().equals(getLastAcceptedState().nodes().getClusterManagerNode()) == false) { logger.debug( "handling publish request in compatibility mode despite version mismatch (expected: >[{}], actual: [{}])", getLastAcceptedVersion(), @@ -652,7 +652,7 @@ public static class VoteCollection { private final Set joins; public boolean addVote(DiscoveryNode sourceNode) { - return sourceNode.isMasterNode() && nodes.put(sourceNode.getId(), sourceNode) == null; + return sourceNode.isClusterManagerNode() && nodes.put(sourceNode.getId(), sourceNode) == null; } public boolean addJoinVote(Join join) { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java index 56d585d81e9dc..14f9cca3630fe 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java @@ -386,7 +386,7 @@ PublishWithJoinResponse handlePublishRequest(PublishRequest publishRequest) { + getLocalNode(); synchronized (mutex) { - final DiscoveryNode sourceNode = publishRequest.getAcceptedState().nodes().getMasterNode(); + final DiscoveryNode sourceNode = publishRequest.getAcceptedState().nodes().getClusterManagerNode(); logger.trace("handlePublishRequest: handling [{}] from [{}]", publishRequest, sourceNode); if (sourceNode.equals(getLocalNode()) && mode != Mode.LEADER) { @@ -515,10 +515,11 @@ private void startElection() { private void abdicateTo(DiscoveryNode newClusterManager) { assert Thread.holdsLock(mutex); assert mode == Mode.LEADER : "expected to be leader on abdication but was " + mode; - assert newClusterManager.isMasterNode() : "should only abdicate to cluster-manager-eligible node but was " + newClusterManager; + assert newClusterManager.isClusterManagerNode() : "should only abdicate to cluster-manager-eligible node but was " + + newClusterManager; final StartJoinRequest startJoinRequest = new StartJoinRequest(newClusterManager, Math.max(getCurrentTerm(), maxTermSeen) + 1); logger.info("abdicating to {} with term {}", newClusterManager, startJoinRequest.getTerm()); - getLastAcceptedState().nodes().mastersFirstStream().forEach(node -> { + getLastAcceptedState().nodes().clusterManagersFirstStream().forEach(node -> { if (isZen1Node(node) == false) { joinHelper.sendStartJoinRequest(startJoinRequest, node); } @@ -568,7 +569,7 @@ private Join joinLeaderInTerm(StartJoinRequest startJoinRequest) { private void handleJoinRequest(JoinRequest joinRequest, JoinHelper.JoinCallback joinCallback) { assert Thread.holdsLock(mutex) == false; - assert getLocalNode().isMasterNode() : getLocalNode() + " received a join but is not cluster-manager-eligible"; + assert getLocalNode().isClusterManagerNode() : getLocalNode() + " received a join but is not cluster-manager-eligible"; logger.trace("handleJoinRequest: as {}, handling {}", mode, joinRequest); if (singleNodeDiscovery && joinRequest.getSourceNode().equals(getLocalNode()) == false) { @@ -587,7 +588,7 @@ private void handleJoinRequest(JoinRequest joinRequest, JoinHelper.JoinCallback transportService.connectToNode(joinRequest.getSourceNode(), ActionListener.wrap(ignore -> { final ClusterState stateForJoinValidation = getStateForClusterManagerService(); - if (stateForJoinValidation.nodes().isLocalNodeElectedMaster()) { + if (stateForJoinValidation.nodes().isLocalNodeElectedClusterManager()) { onJoinValidators.forEach(a -> a.accept(joinRequest.getSourceNode(), stateForJoinValidation)); if (stateForJoinValidation.getBlocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK) == false) { // we do this in a couple of places including the cluster update thread. This one here is really just best effort @@ -676,7 +677,7 @@ void becomeCandidate(String method) { cleanClusterManagerService(); } - if (applierState.nodes().getMasterNodeId() != null) { + if (applierState.nodes().getClusterManagerNodeId() != null) { applierState = clusterStateWithNoClusterManagerBlock(applierState); clusterApplier.onNewClusterState("becoming candidate: " + method, () -> applierState, (source, e) -> {}); } @@ -688,7 +689,7 @@ void becomeCandidate(String method) { void becomeLeader(String method) { assert Thread.holdsLock(mutex) : "Coordinator mutex not held"; assert mode == Mode.CANDIDATE : "expected candidate but was " + mode; - assert getLocalNode().isMasterNode() : getLocalNode() + " became a leader but is not cluster-manager-eligible"; + assert getLocalNode().isClusterManagerNode() : getLocalNode() + " became a leader but is not cluster-manager-eligible"; logger.debug( "{}: coordinator becoming LEADER in term {} (was {}, lastKnownLeader was [{}])", @@ -714,7 +715,7 @@ void becomeLeader(String method) { void becomeFollower(String method, DiscoveryNode leaderNode) { assert Thread.holdsLock(mutex) : "Coordinator mutex not held"; - assert leaderNode.isMasterNode() : leaderNode + " became a leader but is not cluster-manager-eligible"; + assert leaderNode.isClusterManagerNode() : leaderNode + " became a leader but is not cluster-manager-eligible"; assert mode != Mode.LEADER : "do not switch to follower from leader (should be candidate first)"; if (mode == Mode.FOLLOWER && Optional.of(leaderNode).equals(lastKnownLeader)) { @@ -765,7 +766,7 @@ public void onFailure(String source, Exception e) { @Override public ClusterTasksResult execute(ClusterState currentState) { - if (currentState.nodes().isLocalNodeElectedMaster() == false) { + if (currentState.nodes().isLocalNodeElectedClusterManager() == false) { allocationService.cleanCaches(); } return unchanged(); @@ -884,7 +885,8 @@ public void invariant() { assert peerFinder.getCurrentTerm() == getCurrentTerm(); assert followersChecker.getFastResponseState().term == getCurrentTerm() : followersChecker.getFastResponseState(); assert followersChecker.getFastResponseState().mode == getMode() : followersChecker.getFastResponseState(); - assert (applierState.nodes().getMasterNodeId() == null) == applierState.blocks().hasGlobalBlockWithId(NO_MASTER_BLOCK_ID); + assert (applierState.nodes().getClusterManagerNodeId() == null) == applierState.blocks() + .hasGlobalBlockWithId(NO_MASTER_BLOCK_ID); assert preVoteCollector.getPreVoteResponse().equals(getPreVoteResponse()) : preVoteCollector + " vs " + getPreVoteResponse(); assert lagDetector.getTrackedNodes().contains(getLocalNode()) == false : lagDetector.getTrackedNodes(); @@ -901,11 +903,11 @@ public void invariant() { assert peerFinderLeader.equals(lastKnownLeader) : peerFinderLeader; assert electionScheduler == null : electionScheduler; assert prevotingRound == null : prevotingRound; - assert becomingClusterManager || getStateForClusterManagerService().nodes().getMasterNodeId() != null + assert becomingClusterManager || getStateForClusterManagerService().nodes().getClusterManagerNodeId() != null : getStateForClusterManagerService(); assert leaderChecker.leader() == null : leaderChecker.leader(); - assert getLocalNode().equals(applierState.nodes().getMasterNode()) - || (applierState.nodes().getMasterNodeId() == null && applierState.term() < getCurrentTerm()); + assert getLocalNode().equals(applierState.nodes().getClusterManagerNode()) + || (applierState.nodes().getClusterManagerNodeId() == null && applierState.term() < getCurrentTerm()); assert preVoteCollector.getLeader() == getLocalNode() : preVoteCollector; assert clusterFormationFailureHelper.isRunning() == false; @@ -946,12 +948,12 @@ assert getLocalNode().equals(applierState.nodes().getMasterNode()) assert peerFinderLeader.equals(lastKnownLeader) : peerFinderLeader; assert electionScheduler == null : electionScheduler; assert prevotingRound == null : prevotingRound; - assert getStateForClusterManagerService().nodes().getMasterNodeId() == null : getStateForClusterManagerService(); + assert getStateForClusterManagerService().nodes().getClusterManagerNodeId() == null : getStateForClusterManagerService(); assert leaderChecker.currentNodeIsClusterManager() == false; assert lastKnownLeader.equals(Optional.of(leaderChecker.leader())); assert followersChecker.getKnownFollowers().isEmpty(); - assert lastKnownLeader.get().equals(applierState.nodes().getMasterNode()) - || (applierState.nodes().getMasterNodeId() == null + assert lastKnownLeader.get().equals(applierState.nodes().getClusterManagerNode()) + || (applierState.nodes().getClusterManagerNodeId() == null && (applierState.term() < getCurrentTerm() || applierState.version() < getLastAcceptedState().version())); assert currentPublication.map(Publication::isCommitted).orElse(true); assert preVoteCollector.getLeader().equals(lastKnownLeader.get()) : preVoteCollector; @@ -961,11 +963,11 @@ assert getLocalNode().equals(applierState.nodes().getMasterNode()) assert joinAccumulator instanceof JoinHelper.CandidateJoinAccumulator; assert peerFinderLeader.isPresent() == false : peerFinderLeader; assert prevotingRound == null || electionScheduler != null; - assert getStateForClusterManagerService().nodes().getMasterNodeId() == null : getStateForClusterManagerService(); + assert getStateForClusterManagerService().nodes().getClusterManagerNodeId() == null : getStateForClusterManagerService(); assert leaderChecker.currentNodeIsClusterManager() == false; assert leaderChecker.leader() == null : leaderChecker.leader(); assert followersChecker.getKnownFollowers().isEmpty(); - assert applierState.nodes().getMasterNodeId() == null; + assert applierState.nodes().getClusterManagerNodeId() == null; assert currentPublication.map(Publication::isCommitted).orElse(true); assert preVoteCollector.getLeader() == null : preVoteCollector; assert clusterFormationFailureHelper.isRunning(); @@ -993,7 +995,7 @@ public boolean setInitialConfiguration(final VotingConfiguration votingConfigura return false; } - if (getLocalNode().isMasterNode() == false) { + if (getLocalNode().isClusterManagerNode() == false) { logger.debug("skip setting initial configuration as local node is not a cluster-manager-eligible node"); throw new CoordinationStateRejectedException( "this node is not cluster-manager-eligible, but cluster bootstrapping can only happen on a cluster-manager-eligible node" @@ -1060,14 +1062,14 @@ ClusterState improveConfiguration(ClusterState clusterState) { // the logging much harder to follow. final Stream clusterManagerIneligibleNodeIdsInVotingConfig = StreamSupport.stream(clusterState.nodes().spliterator(), false) .filter( - n -> n.isMasterNode() == false + n -> n.isClusterManagerNode() == false && (clusterState.getLastAcceptedConfiguration().getNodeIds().contains(n.getId()) || clusterState.getLastCommittedConfiguration().getNodeIds().contains(n.getId())) ) .map(DiscoveryNode::getId); final Set liveNodes = StreamSupport.stream(clusterState.nodes().spliterator(), false) - .filter(DiscoveryNode::isMasterNode) + .filter(DiscoveryNode::isClusterManagerNode) .filter(coordinationState.get()::containsJoinVoteFor) .filter(discoveryNode -> isZen1Node(discoveryNode) == false) .collect(Collectors.toSet()); @@ -1108,7 +1110,8 @@ static boolean validVotingConfigExclusionState(ClusterState clusterState) { .map(VotingConfigExclusion::getNodeId) .collect(Collectors.toSet()); for (DiscoveryNode node : clusterState.getNodes()) { - if (node.isMasterNode() && (nodeIdsWithAbsentName.contains(node.getId()) || nodeNamesWithAbsentId.contains(node.getName()))) { + if (node.isClusterManagerNode() + && (nodeIdsWithAbsentName.contains(node.getId()) || nodeNamesWithAbsentId.contains(node.getName()))) { return false; } } @@ -1146,7 +1149,7 @@ public void onFailure(String source, Exception e) { // exposed for tests boolean missingJoinVoteFrom(DiscoveryNode node) { - return node.isMasterNode() && coordinationState.get().containsJoinVoteFor(node) == false; + return node.isClusterManagerNode() && coordinationState.get().containsJoinVoteFor(node) == false; } private void handleJoin(Join join) { @@ -1216,7 +1219,7 @@ ClusterState getStateForClusterManagerService() { } private ClusterState clusterStateWithNoClusterManagerBlock(ClusterState clusterState) { - if (clusterState.nodes().getMasterNodeId() != null) { + if (clusterState.nodes().getClusterManagerNodeId() != null) { // remove block if it already exists before adding new one assert clusterState.blocks().hasGlobalBlockWithId(NO_MASTER_BLOCK_ID) == false : "NO_MASTER_BLOCK should only be added by Coordinator"; @@ -1224,7 +1227,7 @@ private ClusterState clusterStateWithNoClusterManagerBlock(ClusterState clusterS .blocks(clusterState.blocks()) .addGlobalBlock(noClusterManagerBlockService.getNoMasterBlock()) .build(); - final DiscoveryNodes discoveryNodes = new DiscoveryNodes.Builder(clusterState.nodes()).masterNodeId(null).build(); + final DiscoveryNodes discoveryNodes = new DiscoveryNodes.Builder(clusterState.nodes()).clusterManagerNodeId(null).build(); return ClusterState.builder(clusterState).blocks(clusterBlocks).nodes(discoveryNodes).build(); } else { return clusterState; @@ -1425,7 +1428,7 @@ protected void onFoundPeersUpdated() { private void startElectionScheduler() { assert electionScheduler == null : electionScheduler; - if (getLocalNode().isMasterNode() == false) { + if (getLocalNode().isClusterManagerNode() == false) { return; } @@ -1640,7 +1643,7 @@ public void onSuccess(String source) { final ClusterState state = getLastAcceptedState(); // committed state if (localNodeMayWinElection(state) == false) { final List clusterManagerCandidates = completedNodes().stream() - .filter(DiscoveryNode::isMasterNode) + .filter(DiscoveryNode::isClusterManagerNode) .filter(node -> nodeMayWinElection(state, node)) .filter(node -> { // check if cluster_manager candidate would be able to get an election quorum if we were diff --git a/server/src/main/java/org/opensearch/cluster/coordination/FollowersChecker.java b/server/src/main/java/org/opensearch/cluster/coordination/FollowersChecker.java index c9a9ba9af09cd..e69a78eae041d 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/FollowersChecker.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/FollowersChecker.java @@ -168,7 +168,7 @@ public void setCurrentNodes(DiscoveryNodes discoveryNodes) { followerCheckers.keySet().removeIf(isUnknownNode); faultyNodes.removeIf(isUnknownNode); - discoveryNodes.mastersFirstStream().forEach(discoveryNode -> { + discoveryNodes.clusterManagersFirstStream().forEach(discoveryNode -> { if (discoveryNode.equals(discoveryNodes.getLocalNode()) == false && followerCheckers.containsKey(discoveryNode) == false && faultyNodes.contains(discoveryNode) == false) { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java index 3accc4f1d5baf..2eae6411eff69 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java @@ -156,16 +156,20 @@ public ClusterTasksResult execute(ClusterState currentSta + term + "), there is a newer cluster-manager" ); - } else if (currentState.nodes().getMasterNodeId() == null && joiningTasks.stream().anyMatch(Task::isBecomeMasterTask)) { - assert currentState.term() < term : "there should be at most one become cluster-manager task per election (= by term)"; - final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder(currentState.coordinationMetadata()) - .term(term) - .build(); - final Metadata metadata = Metadata.builder(currentState.metadata()).coordinationMetadata(coordinationMetadata).build(); - currentState = ClusterState.builder(currentState).metadata(metadata).build(); - } else if (currentState.nodes().isLocalNodeElectedMaster()) { - assert currentState.term() == term : "term should be stable for the same cluster-manager"; - } + } else if (currentState.nodes().getClusterManagerNodeId() == null + && joiningTasks.stream().anyMatch(Task::isBecomeClusterManagerTask)) { + assert currentState.term() < term + : "there should be at most one become cluster-manager task per election (= by term)"; + final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder(currentState.coordinationMetadata()) + .term(term) + .build(); + final Metadata metadata = Metadata.builder(currentState.metadata()) + .coordinationMetadata(coordinationMetadata) + .build(); + currentState = ClusterState.builder(currentState).metadata(metadata).build(); + } else if (currentState.nodes().isLocalNodeElectedClusterManager()) { + assert currentState.term() == term : "term should be stable for the same cluster-manager"; + } return super.execute(currentState, joiningTasks); } @@ -312,7 +316,7 @@ void logLastFailedJoinAttempt() { } public void sendJoinRequest(DiscoveryNode destination, long term, Optional optionalJoin, Runnable onCompletion) { - assert destination.isMasterNode() : "trying to join cluster-manager-ineligible " + destination; + assert destination.isClusterManagerNode() : "trying to join cluster-manager-ineligible " + destination; final StatusInfo statusInfo = nodeHealthService.getHealth(); if (statusInfo.getStatus() == UNHEALTHY) { logger.debug("dropping join request to [{}]: [{}]", destination, statusInfo.getInfo()); @@ -363,7 +367,7 @@ public String executor() { } public void sendStartJoinRequest(final StartJoinRequest startJoinRequest, final DiscoveryNode destination) { - assert startJoinRequest.getSourceNode().isMasterNode() : "sending start-join request for cluster-manager-ineligible " + assert startJoinRequest.getSourceNode().isClusterManagerNode() : "sending start-join request for cluster-manager-ineligible " + startJoinRequest.getSourceNode(); transportService.sendRequest(destination, START_JOIN_ACTION_NAME, startJoinRequest, new TransportResponseHandler() { @Override @@ -536,7 +540,7 @@ public void close(Mode newMode) { final String stateUpdateSource = "elected-as-cluster-manager ([" + pendingAsTasks.size() + "] nodes joined)"; - pendingAsTasks.put(JoinTaskExecutor.newBecomeMasterTask(), (source, e) -> {}); + pendingAsTasks.put(JoinTaskExecutor.newBecomeClusterManagerTask(), (source, e) -> {}); pendingAsTasks.put(JoinTaskExecutor.newFinishElectionTask(), (source, e) -> {}); joinTaskExecutor = joinTaskExecutorGenerator.get(); masterService.submitStateUpdateTasks( diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java index 629a016e0eab1..7582946bb8f3f 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java @@ -103,10 +103,16 @@ public String toString() { return node != null ? node + " " + reason : reason; } - public boolean isBecomeMasterTask() { + public boolean isBecomeClusterManagerTask() { return reason.equals(BECOME_MASTER_TASK_REASON) || reason.equals(BECOME_CLUSTER_MANAGER_TASK_REASON); } + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #isBecomeClusterManagerTask()} */ + @Deprecated + public boolean isBecomeMasterTask() { + return isBecomeClusterManagerTask(); + } + public boolean isFinishElectionTask() { return reason.equals(FINISH_ELECTION_TASK_REASON); } @@ -143,7 +149,7 @@ public ClusterTasksResult execute(ClusterState currentState, List jo if (joiningNodes.size() == 1 && joiningNodes.get(0).isFinishElectionTask()) { return results.successes(joiningNodes).build(currentState); - } else if (currentNodes.getMasterNode() == null && joiningNodes.stream().anyMatch(Task::isBecomeMasterTask)) { + } else if (currentNodes.getClusterManagerNode() == null && joiningNodes.stream().anyMatch(Task::isBecomeClusterManagerTask)) { assert joiningNodes.stream().anyMatch(Task::isFinishElectionTask) : "becoming a cluster-manager but election is not finished " + joiningNodes; // use these joins to try and become the cluster-manager. @@ -151,10 +157,10 @@ public ClusterTasksResult execute(ClusterState currentState, List jo // during the cluster state publishing guarantees that we have enough newState = becomeClusterManagerAndTrimConflictingNodes(currentState, joiningNodes); nodesChanged = true; - } else if (currentNodes.isLocalNodeElectedMaster() == false) { + } else if (currentNodes.isLocalNodeElectedClusterManager() == false) { logger.trace( "processing node joins, but we are not the cluster-manager. current cluster-manager: {}", - currentNodes.getMasterNode() + currentNodes.getClusterManagerNode() ); throw new NotClusterManagerException("Node [" + currentNodes.getLocalNode() + "] not cluster-manager for join request"); } else { @@ -163,7 +169,7 @@ public ClusterTasksResult execute(ClusterState currentState, List jo DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(newState.nodes()); - assert nodesBuilder.isLocalNodeElectedMaster(); + assert nodesBuilder.isLocalNodeElectedClusterManager(); Version minClusterNodeVersion = newState.nodes().getMinNodeVersion(); Version maxClusterNodeVersion = newState.nodes().getMaxNodeVersion(); @@ -172,7 +178,7 @@ public ClusterTasksResult execute(ClusterState currentState, List jo // processing any joins Map joiniedNodeNameIds = new HashMap<>(); for (final Task joinTask : joiningNodes) { - if (joinTask.isBecomeMasterTask() || joinTask.isFinishElectionTask()) { + if (joinTask.isBecomeClusterManagerTask() || joinTask.isFinishElectionTask()) { // noop } else if (currentNodes.nodeExistsWithSameRoles(joinTask.node()) && !currentNodes.nodeExistsWithBWCVersion(joinTask.node())) { logger.debug("received a join request for an existing node [{}]", joinTask.node()); @@ -190,7 +196,7 @@ public ClusterTasksResult execute(ClusterState currentState, List jo nodesChanged = true; minClusterNodeVersion = Version.min(minClusterNodeVersion, node.getVersion()); maxClusterNodeVersion = Version.max(maxClusterNodeVersion, node.getVersion()); - if (node.isMasterNode()) { + if (node.isClusterManagerNode()) { joiniedNodeNameIds.put(node.getName(), node.getId()); } } catch (IllegalArgumentException | IllegalStateException e) { @@ -245,13 +251,13 @@ public ClusterTasksResult execute(ClusterState currentState, List jo } protected ClusterState.Builder becomeClusterManagerAndTrimConflictingNodes(ClusterState currentState, List joiningNodes) { - assert currentState.nodes().getMasterNodeId() == null : currentState; + assert currentState.nodes().getClusterManagerNodeId() == null : currentState; DiscoveryNodes currentNodes = currentState.nodes(); DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(currentNodes); - nodesBuilder.masterNodeId(currentState.nodes().getLocalNodeId()); + nodesBuilder.clusterManagerNodeId(currentState.nodes().getLocalNodeId()); for (final Task joinTask : joiningNodes) { - if (joinTask.isBecomeMasterTask()) { + if (joinTask.isBecomeClusterManagerTask()) { refreshDiscoveryNodeVersionAfterUpgrade(currentNodes, nodesBuilder); } else if (joinTask.isFinishElectionTask()) { // no-op @@ -343,7 +349,7 @@ private void refreshDiscoveryNodeVersionAfterUpgrade(DiscoveryNodes currentNodes } @Override - public boolean runOnlyOnMaster() { + public boolean runOnlyOnClusterManager() { // we validate that we are allowed to change the cluster state during cluster state processing return false; } @@ -366,7 +372,7 @@ public static Task newBecomeClusterManagerTask() { /** * a task that is used to signal the election is stopped and we should process pending joins. - * it may be used in combination with {@link JoinTaskExecutor#newBecomeMasterTask()} + * it may be used in combination with {@link JoinTaskExecutor#newBecomeClusterManagerTask()} */ public static Task newFinishElectionTask() { return new Task(null, Task.FINISH_ELECTION_TASK_REASON); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/LeaderChecker.java b/server/src/main/java/org/opensearch/cluster/coordination/LeaderChecker.java index f1d2754fbbfa7..3d90c7366d2d6 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/LeaderChecker.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/LeaderChecker.java @@ -193,7 +193,7 @@ void setCurrentNodes(DiscoveryNodes discoveryNodes) { // For assertions boolean currentNodeIsClusterManager() { - return discoveryNodes.isLocalNodeElectedMaster(); + return discoveryNodes.isLocalNodeElectedClusterManager(); } private void handleLeaderCheck(LeaderCheckRequest request) { @@ -209,7 +209,7 @@ private void handleLeaderCheck(LeaderCheckRequest request) { + "]"; logger.debug(message); throw new NodeHealthCheckFailureException(message); - } else if (discoveryNodes.isLocalNodeElectedMaster() == false) { + } else if (discoveryNodes.isLocalNodeElectedClusterManager() == false) { logger.debug("rejecting leader check on non-cluster-manager {}", request); throw new CoordinationStateRejectedException( "rejecting leader check from [" + request.getSender() + "] sent to a node that is no longer the cluster-manager" diff --git a/server/src/main/java/org/opensearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java index 3a466d0bfe6dd..2cf0fb3bffeee 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java @@ -136,7 +136,7 @@ public void onFailure(final String source, final Exception e) { } @Override - public void onNoLongerMaster(String source) { + public void onNoLongerClusterManager(String source) { logger.debug("no longer cluster-manager while processing node removal [{}]", source); } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PeersResponse.java b/server/src/main/java/org/opensearch/cluster/coordination/PeersResponse.java index 8e2e6fde3a485..0b0d6f8e57174 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PeersResponse.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PeersResponse.java @@ -76,10 +76,19 @@ public void writeTo(StreamOutput out) throws IOException { /** * @return the node that is currently leading, according to the responding node. */ - public Optional getMasterNode() { + public Optional getClusterManagerNode() { return clusterManagerNode; } + /** + * @return the node that is currently leading, according to the responding node. + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #getClusterManagerNode()} + */ + @Deprecated + public Optional getMasterNode() { + return getClusterManagerNode(); + } + /** * @return the collection of known peers of the responding node, or an empty collection if the responding node believes there * is currently a leader. diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Publication.java b/server/src/main/java/org/opensearch/cluster/coordination/Publication.java index c85ea07591edc..429890e7420de 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Publication.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Publication.java @@ -78,7 +78,10 @@ public Publication(PublishRequest publishRequest, AckListener ackListener, LongS startTime = currentTimeSupplier.getAsLong(); applyCommitRequest = Optional.empty(); publicationTargets = new ArrayList<>(publishRequest.getAcceptedState().getNodes().getNodes().size()); - publishRequest.getAcceptedState().getNodes().mastersFirstStream().forEach(n -> publicationTargets.add(new PublicationTarget(n))); + publishRequest.getAcceptedState() + .getNodes() + .clusterManagersFirstStream() + .forEach(n -> publicationTargets.add(new PublicationTarget(n))); } public void start(Set faultyNodes) { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java index c3b3b237a1944..8ef8124799bf0 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java @@ -232,7 +232,7 @@ private PublishWithJoinResponse handleIncomingPublishRequest(BytesTransportReque private PublishWithJoinResponse acceptState(ClusterState incomingState) { // if the state is coming from the current node, use original request instead (see currentPublishRequestToSelf for explanation) - if (transportService.getLocalNode().equals(incomingState.nodes().getMasterNode())) { + if (transportService.getLocalNode().equals(incomingState.nodes().getClusterManagerNode())) { final PublishRequest publishRequest = currentPublishRequestToSelf.get(); if (publishRequest == null || publishRequest.getAcceptedState().stateUUID().equals(incomingState.stateUUID()) == false) { throw new IllegalStateException("publication to self failed for " + publishRequest); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java b/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java index 2d268b5cca779..1570a84ab871f 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java @@ -125,14 +125,14 @@ public VotingConfiguration reconfigure( ); final Set liveNodeIds = liveNodes.stream() - .filter(DiscoveryNode::isMasterNode) + .filter(DiscoveryNode::isClusterManagerNode) .map(DiscoveryNode::getId) .collect(Collectors.toSet()); final Set currentConfigNodeIds = currentConfig.getNodeIds(); final Set orderedCandidateNodes = new TreeSet<>(); liveNodes.stream() - .filter(DiscoveryNode::isMasterNode) + .filter(DiscoveryNode::isClusterManagerNode) .filter(n -> retiredNodeIds.contains(n.getId()) == false) .forEach( n -> orderedCandidateNodes.add( diff --git a/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapClusterManagerCommand.java b/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapClusterManagerCommand.java index 8506a59b22763..229c20b8dea17 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapClusterManagerCommand.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapClusterManagerCommand.java @@ -98,7 +98,7 @@ public class UnsafeBootstrapClusterManagerCommand extends OpenSearchNodeCommand protected boolean validateBeforeLock(Terminal terminal, Environment env) { Settings settings = env.settings(); terminal.println(Terminal.Verbosity.VERBOSE, "Checking node.roles setting"); - Boolean clusterManager = DiscoveryNode.isMasterNode(settings); + Boolean clusterManager = DiscoveryNode.isClusterManagerNode(settings); if (clusterManager == false) { throw new OpenSearchException(NOT_CLUSTER_MANAGER_NODE_MSG); } diff --git a/server/src/main/java/org/opensearch/cluster/health/ClusterStateHealth.java b/server/src/main/java/org/opensearch/cluster/health/ClusterStateHealth.java index f1fe680f80769..673695e217a5b 100644 --- a/server/src/main/java/org/opensearch/cluster/health/ClusterStateHealth.java +++ b/server/src/main/java/org/opensearch/cluster/health/ClusterStateHealth.java @@ -86,7 +86,7 @@ public ClusterStateHealth(final ClusterState clusterState) { public ClusterStateHealth(final ClusterState clusterState, final String[] concreteIndices) { numberOfNodes = clusterState.nodes().getSize(); numberOfDataNodes = clusterState.nodes().getDataNodes().size(); - hasDiscoveredClusterManager = clusterState.nodes().getMasterNodeId() != null; + hasDiscoveredClusterManager = clusterState.nodes().getClusterManagerNodeId() != null; indices = new HashMap<>(); for (String index : concreteIndices) { IndexRoutingTable indexRoutingTable = clusterState.routingTable().index(index); @@ -238,10 +238,16 @@ public double getActiveShardsPercent() { return activeShardsPercent; } - public boolean hasDiscoveredMaster() { + public boolean hasDiscoveredClusterManager() { return hasDiscoveredClusterManager; } + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #hasDiscoveredClusterManager()} */ + @Deprecated + public boolean hasDiscoveredMaster() { + return hasDiscoveredClusterManager(); + } + @Override public Iterator iterator() { return indices.values().iterator(); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java index 6dded44fe70bb..1a0f4f0f83e00 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java @@ -132,7 +132,7 @@ public void removeTemplates(final RemoveRequest request, final RemoveListener li @Override public TimeValue timeout() { - return request.masterTimeout; + return request.clusterManagerTimeout; } @Override @@ -860,7 +860,7 @@ public void putTemplate(final PutRequest request, final PutListener listener) { @Override public TimeValue timeout() { - return request.masterTimeout; + return request.clusterManagerTimeout; } @Override @@ -1526,6 +1526,10 @@ public static class PutRequest { String mappings = null; List aliases = new ArrayList<>(); + TimeValue clusterManagerTimeout = ClusterManagerNodeRequest.DEFAULT_CLUSTER_MANAGER_NODE_TIMEOUT; + + /** @deprecated As of 2.1, because supporting inclusive language, replaced by {@link #clusterManagerTimeout} */ + @Deprecated TimeValue masterTimeout = ClusterManagerNodeRequest.DEFAULT_CLUSTER_MANAGER_NODE_TIMEOUT; public PutRequest(String cause, String name) { @@ -1563,11 +1567,17 @@ public PutRequest aliases(Set aliases) { return this; } - public PutRequest masterTimeout(TimeValue masterTimeout) { - this.masterTimeout = masterTimeout; + public PutRequest clusterManagerTimeout(TimeValue clusterManagerTimeout) { + this.clusterManagerTimeout = clusterManagerTimeout; return this; } + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerTimeout(TimeValue)} */ + @Deprecated + public PutRequest masterTimeout(TimeValue masterTimeout) { + return clusterManagerTimeout(masterTimeout); + } + public PutRequest version(Integer version) { this.version = version; return this; @@ -1598,16 +1608,26 @@ public boolean acknowledged() { */ public static class RemoveRequest { final String name; + TimeValue clusterManagerTimeout = ClusterManagerNodeRequest.DEFAULT_CLUSTER_MANAGER_NODE_TIMEOUT; + + /** @deprecated As of 2.1, because supporting inclusive language, replaced by {@link #clusterManagerTimeout} */ + @Deprecated TimeValue masterTimeout = ClusterManagerNodeRequest.DEFAULT_CLUSTER_MANAGER_NODE_TIMEOUT; public RemoveRequest(String name) { this.name = name; } - public RemoveRequest masterTimeout(TimeValue masterTimeout) { - this.masterTimeout = masterTimeout; + public RemoveRequest clusterManagerTimeout(TimeValue clusterManagerTimeout) { + this.clusterManagerTimeout = clusterManagerTimeout; return this; } + + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerTimeout} */ + @Deprecated + public RemoveRequest masterTimeout(TimeValue masterTimeout) { + return clusterManagerTimeout(masterTimeout); + } } /** diff --git a/server/src/main/java/org/opensearch/cluster/metadata/SystemIndexMetadataUpgradeService.java b/server/src/main/java/org/opensearch/cluster/metadata/SystemIndexMetadataUpgradeService.java index c708f3830f431..b1dd490a032b0 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/SystemIndexMetadataUpgradeService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/SystemIndexMetadataUpgradeService.java @@ -70,8 +70,8 @@ public SystemIndexMetadataUpgradeService(SystemIndices systemIndices, ClusterSer @Override public void clusterChanged(ClusterChangedEvent event) { - if (event.localNodeMaster() != clusterManager) { - this.clusterManager = event.localNodeMaster(); + if (event.localNodeClusterManager() != clusterManager) { + this.clusterManager = event.localNodeClusterManager(); } if (clusterManager && updateTaskPending == false) { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/TemplateUpgradeService.java b/server/src/main/java/org/opensearch/cluster/metadata/TemplateUpgradeService.java index 01cadf3910267..51da1c73e57a4 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/TemplateUpgradeService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/TemplateUpgradeService.java @@ -109,7 +109,7 @@ public TemplateUpgradeService( } return upgradedTemplates; }; - if (DiscoveryNode.isMasterNode(clusterService.getSettings())) { + if (DiscoveryNode.isClusterManagerNode(clusterService.getSettings())) { clusterService.addListener(this); } } @@ -117,7 +117,7 @@ public TemplateUpgradeService( @Override public void clusterChanged(ClusterChangedEvent event) { ClusterState state = event.state(); - if (state.nodes().isLocalNodeElectedMaster() == false) { + if (state.nodes().isLocalNodeElectedClusterManager() == false) { return; } if (state.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) { diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java index 0d55624a35998..beb69e7f1cfaa 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java @@ -74,7 +74,7 @@ public class DiscoveryNode implements Writeable, ToXContentFragment { public static boolean nodeRequiresLocalStorage(Settings settings) { boolean localStorageEnable = Node.NODE_LOCAL_STORAGE_SETTING.get(settings); - if (localStorageEnable == false && (isDataNode(settings) || isMasterNode(settings))) { + if (localStorageEnable == false && (isDataNode(settings) || isClusterManagerNode(settings))) { // TODO: make this a proper setting validation logic, requiring multi-settings validation throw new IllegalArgumentException("storage can not be disabled for cluster-manager and data nodes"); } @@ -96,10 +96,16 @@ public static boolean hasRole(final Settings settings, final DiscoveryNodeRole r } } - public static boolean isMasterNode(Settings settings) { + public static boolean isClusterManagerNode(Settings settings) { return hasRole(settings, DiscoveryNodeRole.MASTER_ROLE) || hasRole(settings, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); } + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #isClusterManagerNode(Settings)} */ + @Deprecated + public static boolean isMasterNode(Settings settings) { + return isClusterManagerNode(settings); + } + /** * Due to the way that plugins may not be available when settings are being initialized, * not all roles may be available from a static/initializing context such as a {@link Setting} @@ -462,10 +468,20 @@ public boolean isDataNode() { /** * Can this node become cluster-manager or not. */ - public boolean isMasterNode() { + public boolean isClusterManagerNode() { return roles.contains(DiscoveryNodeRole.MASTER_ROLE) || roles.contains(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); } + /** + * Can this node become cluster-manager or not. + * + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #isClusterManagerNode()} + */ + @Deprecated + public boolean isMasterNode() { + return isClusterManagerNode(); + } + /** * Returns a boolean that tells whether this an ingest node or not */ diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java index 0304a51f330c8..8f1d8ef824a56 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java @@ -114,7 +114,7 @@ public Iterator iterator() { /** * Returns {@code true} if the local node is the elected cluster-manager node. */ - public boolean isLocalNodeElectedMaster() { + public boolean isLocalNodeElectedClusterManager() { if (localNodeId == null) { // we don't know yet the local node id, return false return false; @@ -122,6 +122,16 @@ public boolean isLocalNodeElectedMaster() { return localNodeId.equals(clusterManagerNodeId); } + /** + * Returns {@code true} if the local node is the elected cluster-manager node. + * + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #isLocalNodeElectedClusterManager()} + */ + @Deprecated + public boolean isLocalNodeElectedMaster() { + return isLocalNodeElectedClusterManager(); + } + /** * Get the number of known nodes * @@ -154,10 +164,21 @@ public ImmutableOpenMap getDataNodes() { * * @return {@link Map} of the discovered cluster-manager nodes arranged by their ids */ - public ImmutableOpenMap getMasterNodes() { + public ImmutableOpenMap getClusterManagerNodes() { return this.clusterManagerNodes; } + /** + * Get a {@link Map} of the discovered cluster-manager nodes arranged by their ids + * + * @return {@link Map} of the discovered cluster-manager nodes arranged by their ids + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #getClusterManagerNodes()} + */ + @Deprecated + public ImmutableOpenMap getMasterNodes() { + return getClusterManagerNodes(); + } + /** * @return All the ingest nodes arranged by their ids */ @@ -170,12 +191,23 @@ public ImmutableOpenMap getIngestNodes() { * * @return {@link Map} of the discovered cluster-manager and data nodes arranged by their ids */ - public ImmutableOpenMap getMasterAndDataNodes() { + public ImmutableOpenMap getClusterManagerAndDataNodes() { ImmutableOpenMap.Builder nodes = ImmutableOpenMap.builder(dataNodes); nodes.putAll(clusterManagerNodes); return nodes.build(); } + /** + * Get a {@link Map} of the discovered cluster-manager and data nodes arranged by their ids + * + * @return {@link Map} of the discovered cluster-manager and data nodes arranged by their ids + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #getClusterManagerAndDataNodes()} + */ + @Deprecated + public ImmutableOpenMap getMasterAndDataNodes() { + return getClusterManagerAndDataNodes(); + } + /** * Get a {@link Map} of the coordinating only nodes (nodes which are neither cluster-manager, nor data, nor ingest nodes) arranged by their ids * @@ -192,13 +224,23 @@ public ImmutableOpenMap getCoordinatingOnlyNodes() { /** * Returns a stream of all nodes, with cluster-manager nodes at the front */ - public Stream mastersFirstStream() { + public Stream clusterManagersFirstStream() { return Stream.concat( StreamSupport.stream(clusterManagerNodes.spliterator(), false).map(cur -> cur.value), - StreamSupport.stream(this.spliterator(), false).filter(n -> n.isMasterNode() == false) + StreamSupport.stream(this.spliterator(), false).filter(n -> n.isClusterManagerNode() == false) ); } + /** + * Returns a stream of all nodes, with cluster-manager nodes at the front + * + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagersFirstStream()} + */ + @Deprecated + public Stream mastersFirstStream() { + return clusterManagersFirstStream(); + } + /** * Get a node by its id * @@ -256,10 +298,21 @@ public boolean nodeExistsWithBWCVersion(DiscoveryNode discoveryNode) { * * @return id of the cluster-manager */ - public String getMasterNodeId() { + public String getClusterManagerNodeId() { return this.clusterManagerNodeId; } + /** + * Get the id of the cluster-manager node + * + * @return id of the cluster-manager + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #getClusterManagerNodeId()} + */ + @Deprecated + public String getMasterNodeId() { + return getClusterManagerNodeId(); + } + /** * Get the id of the local node * @@ -282,13 +335,24 @@ public DiscoveryNode getLocalNode() { * Returns the cluster-manager node, or {@code null} if there is no cluster-manager node */ @Nullable - public DiscoveryNode getMasterNode() { + public DiscoveryNode getClusterManagerNode() { if (clusterManagerNodeId != null) { return nodes.get(clusterManagerNodeId); } return null; } + /** + * Returns the cluster-manager node, or {@code null} if there is no cluster-manager node + * + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #getClusterManagerNode()} + */ + @Deprecated + @Nullable + public DiscoveryNode getMasterNode() { + return getClusterManagerNode(); + } + /** * Get a node by its address * @@ -396,7 +460,7 @@ public String[] resolveNodes(String... nodes) { resolvedNodesIds.add(localNodeId); } } else if (nodeId.equals("_master") || nodeId.equals("_cluster_manager")) { - String clusterManagerNodeId = getMasterNodeId(); + String clusterManagerNodeId = getClusterManagerNodeId(); if (clusterManagerNodeId != null) { resolvedNodesIds.add(clusterManagerNodeId); } @@ -490,8 +554,8 @@ public Delta delta(DiscoveryNodes other) { } return new Delta( - other.getMasterNode(), - getMasterNode(), + other.getClusterManagerNode(), + getClusterManagerNode(), localNodeId, Collections.unmodifiableList(removed), Collections.unmodifiableList(added) @@ -507,7 +571,7 @@ public String toString() { if (node == getLocalNode()) { sb.append(", local"); } - if (node == getMasterNode()) { + if (node == getClusterManagerNode()) { sb.append(", cluster-manager"); } sb.append("\n"); @@ -545,23 +609,43 @@ private Delta( } public boolean hasChanges() { - return masterNodeChanged() || !removed.isEmpty() || !added.isEmpty(); + return clusterManagerNodeChanged() || !removed.isEmpty() || !added.isEmpty(); } - public boolean masterNodeChanged() { + public boolean clusterManagerNodeChanged() { return Objects.equals(newClusterManagerNode, previousClusterManagerNode) == false; } + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerNodeChanged()} */ + @Deprecated + public boolean masterNodeChanged() { + return clusterManagerNodeChanged(); + } + @Nullable public DiscoveryNode previousClusterManagerNode() { return previousClusterManagerNode; } + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #previousClusterManagerNode()} */ + @Deprecated @Nullable - public DiscoveryNode newMasterNode() { + public DiscoveryNode previousMasterNode() { + return previousClusterManagerNode(); + } + + @Nullable + public DiscoveryNode newClusterManagerNode() { return newClusterManagerNode; } + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #newClusterManagerNode()} */ + @Deprecated + @Nullable + public DiscoveryNode newMasterNode() { + return newClusterManagerNode(); + } + public boolean removed() { return !removed.isEmpty(); } @@ -580,14 +664,14 @@ public List addedNodes() { public String shortSummary() { final StringBuilder summary = new StringBuilder(); - if (masterNodeChanged()) { + if (clusterManagerNodeChanged()) { summary.append("cluster-manager node changed {previous ["); if (previousClusterManagerNode() != null) { summary.append(previousClusterManagerNode()); } summary.append("], current ["); - if (newMasterNode() != null) { - summary.append(newMasterNode()); + if (newClusterManagerNode() != null) { + summary.append(newClusterManagerNode()); } summary.append("]}"); } @@ -631,7 +715,7 @@ public void writeTo(StreamOutput out) throws IOException { public static DiscoveryNodes readFrom(StreamInput in, DiscoveryNode localNode) throws IOException { Builder builder = new Builder(); if (in.readBoolean()) { - builder.masterNodeId(in.readString()); + builder.clusterManagerNodeId(in.readString()); } if (localNode != null) { builder.localNodeId(localNode.getId()); @@ -679,7 +763,7 @@ public Builder() { } public Builder(DiscoveryNodes nodes) { - this.clusterManagerNodeId = nodes.getMasterNodeId(); + this.clusterManagerNodeId = nodes.getClusterManagerNodeId(); this.localNodeId = nodes.getLocalNodeId(); this.nodes = ImmutableOpenMap.builder(nodes.getNodes()); } @@ -724,11 +808,17 @@ public Builder remove(DiscoveryNode node) { return this; } - public Builder masterNodeId(String clusterManagerNodeId) { + public Builder clusterManagerNodeId(String clusterManagerNodeId) { this.clusterManagerNodeId = clusterManagerNodeId; return this; } + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerNodeId} */ + @Deprecated + public Builder masterNodeId(String clusterManagerNodeId) { + return clusterManagerNodeId(clusterManagerNodeId); + } + public Builder localNodeId(String localNodeId) { this.localNodeId = localNodeId; return this; @@ -761,7 +851,7 @@ private String validateAdd(DiscoveryNode node) { public DiscoveryNodes build() { ImmutableOpenMap.Builder dataNodesBuilder = ImmutableOpenMap.builder(); - ImmutableOpenMap.Builder masterNodesBuilder = ImmutableOpenMap.builder(); + ImmutableOpenMap.Builder clusterManagerNodesBuilder = ImmutableOpenMap.builder(); ImmutableOpenMap.Builder ingestNodesBuilder = ImmutableOpenMap.builder(); Version minNodeVersion = null; Version maxNodeVersion = null; @@ -771,11 +861,11 @@ public DiscoveryNodes build() { if (nodeEntry.value.isDataNode()) { dataNodesBuilder.put(nodeEntry.key, nodeEntry.value); } - if (nodeEntry.value.isMasterNode()) { - masterNodesBuilder.put(nodeEntry.key, nodeEntry.value); + if (nodeEntry.value.isClusterManagerNode()) { + clusterManagerNodesBuilder.put(nodeEntry.key, nodeEntry.value); } final Version version = nodeEntry.value.getVersion(); - if (nodeEntry.value.isDataNode() || nodeEntry.value.isMasterNode()) { + if (nodeEntry.value.isDataNode() || nodeEntry.value.isClusterManagerNode()) { if (minNonClientNodeVersion == null) { minNonClientNodeVersion = version; maxNonClientNodeVersion = version; @@ -794,7 +884,7 @@ public DiscoveryNodes build() { return new DiscoveryNodes( nodes.build(), dataNodesBuilder.build(), - masterNodesBuilder.build(), + clusterManagerNodesBuilder.build(), ingestNodesBuilder.build(), clusterManagerNodeId, localNodeId, @@ -805,9 +895,15 @@ public DiscoveryNodes build() { ); } - public boolean isLocalNodeElectedMaster() { + public boolean isLocalNodeElectedClusterManager() { return clusterManagerNodeId != null && clusterManagerNodeId.equals(localNodeId); } + + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #isLocalNodeElectedClusterManager()} */ + @Deprecated + public boolean isLocalNodeElectedMaster() { + return isLocalNodeElectedClusterManager(); + } } /** diff --git a/server/src/main/java/org/opensearch/cluster/routing/BatchedRerouteService.java b/server/src/main/java/org/opensearch/cluster/routing/BatchedRerouteService.java index 0738254823964..641fb9abf73e0 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/BatchedRerouteService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/BatchedRerouteService.java @@ -141,7 +141,7 @@ public ClusterState execute(ClusterState currentState) { } @Override - public void onNoLongerMaster(String source) { + public void onNoLongerClusterManager(String source) { synchronized (mutex) { if (pendingRerouteListeners == currentListeners) { pendingRerouteListeners = null; diff --git a/server/src/main/java/org/opensearch/cluster/routing/DelayedAllocationService.java b/server/src/main/java/org/opensearch/cluster/routing/DelayedAllocationService.java index 321b10be6463d..844b78dccc59b 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/DelayedAllocationService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/DelayedAllocationService.java @@ -150,7 +150,7 @@ public DelayedAllocationService(ThreadPool threadPool, ClusterService clusterSer this.threadPool = threadPool; this.clusterService = clusterService; this.allocationService = allocationService; - if (DiscoveryNode.isMasterNode(clusterService.getSettings())) { + if (DiscoveryNode.isClusterManagerNode(clusterService.getSettings())) { clusterService.addListener(this); } } @@ -174,7 +174,7 @@ protected long currentNanoTime() { @Override public void clusterChanged(ClusterChangedEvent event) { - if (event.localNodeMaster()) { + if (event.localNodeClusterManager()) { long currentNanoTime = currentNanoTime(); scheduleIfNeeded(currentNanoTime, event.state()); } @@ -196,7 +196,7 @@ private void removeIfSameTask(DelayedRerouteTask expectedTask) { * Figure out if an existing scheduled reroute is good enough or whether we need to cancel and reschedule. */ private synchronized void scheduleIfNeeded(long currentNanoTime, ClusterState state) { - assertClusterOrMasterStateThread(); + assertClusterOrClusterManagerStateThread(); long nextDelayNanos = UnassignedInfo.findNextDelayedAllocation(currentNanoTime, state); if (nextDelayNanos < 0) { logger.trace("no need to schedule reroute - no delayed unassigned shards"); @@ -236,7 +236,14 @@ private synchronized void scheduleIfNeeded(long currentNanoTime, ClusterState st } // protected so that it can be overridden (and disabled) by unit tests + protected void assertClusterOrClusterManagerStateThread() { + assert ClusterService.assertClusterOrClusterManagerStateThread(); + } + + // protected so that it can be overridden (and disabled) by unit tests + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #assertClusterOrClusterManagerStateThread()} */ + @Deprecated protected void assertClusterOrMasterStateThread() { - assert ClusterService.assertClusterOrMasterStateThread(); + assertClusterOrClusterManagerStateThread(); } } diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java index bc1397989dc96..ce6672f0961bd 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java @@ -42,6 +42,7 @@ import org.opensearch.cluster.ClusterStateObserver; import org.opensearch.cluster.ClusterStateTaskConfig; import org.opensearch.cluster.LocalNodeClusterManagerListener; +import org.opensearch.cluster.LocalNodeMasterListener; import org.opensearch.cluster.NodeConnectionsService; import org.opensearch.cluster.TimeoutClusterStateListener; import org.opensearch.cluster.metadata.ProcessClusterEventTimeoutException; @@ -277,10 +278,19 @@ public void removeTimeoutListener(TimeoutClusterStateListener listener) { /** * Add a listener for on/off local node cluster-manager events */ - public void addLocalNodeMasterListener(LocalNodeClusterManagerListener listener) { + public void addLocalNodeClusterManagerListener(LocalNodeClusterManagerListener listener) { addListener(listener); } + /** + * Add a listener for on/off local node cluster-manager events + * @deprecated As of 2.1, because supporting inclusive language, replaced by {@link #addLocalNodeClusterManagerListener} + */ + @Deprecated + public void addLocalNodeMasterListener(LocalNodeMasterListener listener) { + addLocalNodeClusterManagerListener(listener); + } + /** * Adds a cluster state listener that is expected to be removed during a short period of time. * If provided, the listener will be notified once a specific time has elapsed. diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterService.java index baf453d18b3b3..c58bb4d9a947c 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterService.java @@ -40,6 +40,7 @@ import org.opensearch.cluster.ClusterStateTaskExecutor; import org.opensearch.cluster.ClusterStateTaskListener; import org.opensearch.cluster.LocalNodeClusterManagerListener; +import org.opensearch.cluster.LocalNodeMasterListener; import org.opensearch.cluster.NodeConnectionsService; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.OperationRouting; @@ -214,8 +215,17 @@ public void removeListener(ClusterStateListener listener) { /** * Add a listener for on/off local node cluster-manager events */ - public void addLocalNodeMasterListener(LocalNodeClusterManagerListener listener) { - clusterApplierService.addLocalNodeMasterListener(listener); + public void addLocalNodeClusterManagerListener(LocalNodeClusterManagerListener listener) { + clusterApplierService.addLocalNodeClusterManagerListener(listener); + } + + /** + * Add a listener for on/off local node cluster-manager events + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #addLocalNodeClusterManagerListener} + */ + @Deprecated + public void addLocalNodeMasterListener(LocalNodeMasterListener listener) { + addLocalNodeClusterManagerListener(listener); } public MasterService getMasterService() { @@ -240,13 +250,19 @@ public ClusterApplierService getClusterApplierService() { return clusterApplierService; } - public static boolean assertClusterOrMasterStateThread() { + public static boolean assertClusterOrClusterManagerStateThread() { assert Thread.currentThread().getName().contains(ClusterApplierService.CLUSTER_UPDATE_THREAD_NAME) || Thread.currentThread().getName().contains(MasterService.MASTER_UPDATE_THREAD_NAME) : "not called from the master/cluster state update thread"; return true; } + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #assertClusterOrClusterManagerStateThread} */ + @Deprecated + public static boolean assertClusterOrMasterStateThread() { + return assertClusterOrClusterManagerStateThread(); + } + public ClusterName getClusterName() { return clusterName; } diff --git a/server/src/main/java/org/opensearch/cluster/service/MasterService.java b/server/src/main/java/org/opensearch/cluster/service/MasterService.java index 2d52887858372..cc58c56e00dd5 100644 --- a/server/src/main/java/org/opensearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/opensearch/cluster/service/MasterService.java @@ -256,7 +256,7 @@ private void runTasks(TaskInputs taskInputs) { logger.debug("executing cluster state update for [{}]", summary); final ClusterState previousClusterState = state(); - if (!previousClusterState.nodes().isLocalNodeElectedMaster() && taskInputs.runOnlyWhenClusterManager()) { + if (!previousClusterState.nodes().isLocalNodeElectedClusterManager() && taskInputs.runOnlyWhenClusterManager()) { logger.debug("failing [{}]: local node is no longer cluster-manager", summary); taskInputs.onNoLongerClusterManager(); return; @@ -616,9 +616,9 @@ public void onFailure(String source, Exception e) { } @Override - public void onNoLongerMaster(String source) { + public void onNoLongerClusterManager(String source) { try (ThreadContext.StoredContext ignore = context.get()) { - listener.onNoLongerMaster(source); + listener.onNoLongerClusterManager(source); } catch (Exception e) { logger.error( () -> new ParameterizedMessage( @@ -745,7 +745,7 @@ private static class AckCountDownListener implements Discovery.AckListener { this.ackedTaskListener = ackedTaskListener; this.clusterStateVersion = clusterStateVersion; this.threadPool = threadPool; - this.clusterManagerNode = nodes.getMasterNode(); + this.clusterManagerNode = nodes.getClusterManagerNode(); int countDown = 0; for (DiscoveryNode node : nodes) { // we always wait for at least the cluster-manager node @@ -823,8 +823,8 @@ private ClusterTasksResult executeTasks(TaskInputs taskInputs, ClusterSt List inputs = taskInputs.updateTasks.stream().map(tUpdateTask -> tUpdateTask.task).collect(Collectors.toList()); clusterTasksResult = taskInputs.executor.execute(previousClusterState, inputs); if (previousClusterState != clusterTasksResult.resultingState - && previousClusterState.nodes().isLocalNodeElectedMaster() - && (clusterTasksResult.resultingState.nodes().isLocalNodeElectedMaster() == false)) { + && previousClusterState.nodes().isLocalNodeElectedClusterManager() + && (clusterTasksResult.resultingState.nodes().isLocalNodeElectedClusterManager() == false)) { throw new AssertionError("update task submitted to ClusterManagerService cannot remove cluster-manager"); } } catch (Exception e) { @@ -888,11 +888,11 @@ private class TaskInputs { } boolean runOnlyWhenClusterManager() { - return executor.runOnlyOnMaster(); + return executor.runOnlyOnClusterManager(); } void onNoLongerClusterManager() { - updateTasks.forEach(task -> task.listener.onNoLongerMaster(task.source())); + updateTasks.forEach(task -> task.listener.onNoLongerClusterManager(task.source())); } } diff --git a/server/src/main/java/org/opensearch/common/settings/ConsistentSettingsService.java b/server/src/main/java/org/opensearch/common/settings/ConsistentSettingsService.java index 3be1c4b080b5f..76f5721c85efd 100644 --- a/server/src/main/java/org/opensearch/common/settings/ConsistentSettingsService.java +++ b/server/src/main/java/org/opensearch/common/settings/ConsistentSettingsService.java @@ -123,7 +123,7 @@ public boolean areAllConsistent() { "no published hash for the consistent secure setting [{}] but it exists on the local node", concreteSecureSetting.getKey() ); - if (state.nodes().isLocalNodeElectedMaster()) { + if (state.nodes().isLocalNodeElectedClusterManager()) { throw new IllegalStateException( "Master node cannot validate consistent setting. No published hash for [" + concreteSecureSetting.getKey() @@ -162,7 +162,7 @@ public boolean areAllConsistent() { concreteSecureSetting.getKey(), computedSaltedHash ); - if (state.nodes().isLocalNodeElectedMaster()) { + if (state.nodes().isLocalNodeElectedClusterManager()) { throw new IllegalStateException( "Master node cannot validate consistent setting. The published hash [" + publishedHash diff --git a/server/src/main/java/org/opensearch/discovery/HandshakingTransportAddressConnector.java b/server/src/main/java/org/opensearch/discovery/HandshakingTransportAddressConnector.java index fcc890b1c21c9..8c0073cbbf245 100644 --- a/server/src/main/java/org/opensearch/discovery/HandshakingTransportAddressConnector.java +++ b/server/src/main/java/org/opensearch/discovery/HandshakingTransportAddressConnector.java @@ -141,7 +141,7 @@ protected void innerOnResponse(DiscoveryNode remoteNode) { if (remoteNode.equals(transportService.getLocalNode())) { listener.onFailure(new ConnectTransportException(remoteNode, "local node found")); - } else if (remoteNode.isMasterNode() == false) { + } else if (remoteNode.isClusterManagerNode() == false) { listener.onFailure( new ConnectTransportException(remoteNode, "non-cluster-manager-eligible node found") ); diff --git a/server/src/main/java/org/opensearch/discovery/PeerFinder.java b/server/src/main/java/org/opensearch/discovery/PeerFinder.java index 03cdc4ba68cc9..a601a6fbe4d82 100644 --- a/server/src/main/java/org/opensearch/discovery/PeerFinder.java +++ b/server/src/main/java/org/opensearch/discovery/PeerFinder.java @@ -174,7 +174,7 @@ PeersResponse handlePeersRequest(PeersRequest peersRequest) { final List knownPeers; if (active) { assert leader.isPresent() == false : leader; - if (peersRequest.getSourceNode().isMasterNode()) { + if (peersRequest.getSourceNode().isClusterManagerNode()) { startProbe(peersRequest.getSourceNode().getAddress()); } peersRequest.getKnownPeers().stream().map(DiscoveryNode::getAddress).forEach(this::startProbe); @@ -291,7 +291,7 @@ private boolean handleWakeUp() { } logger.trace("probing cluster-manager nodes from cluster state: {}", lastAcceptedNodes); - for (ObjectCursor discoveryNodeObjectCursor : lastAcceptedNodes.getMasterNodes().values()) { + for (ObjectCursor discoveryNodeObjectCursor : lastAcceptedNodes.getClusterManagerNodes().values()) { startProbe(discoveryNodeObjectCursor.value.getAddress()); } @@ -396,7 +396,7 @@ void establishConnection() { transportAddressConnector.connectToRemoteMasterNode(transportAddress, new ActionListener() { @Override public void onResponse(DiscoveryNode remoteNode) { - assert remoteNode.isMasterNode() : remoteNode + " is not cluster-manager-eligible"; + assert remoteNode.isClusterManagerNode() : remoteNode + " is not cluster-manager-eligible"; assert remoteNode.equals(getLocalNode()) == false : remoteNode + " is the local node"; synchronized (mutex) { if (active == false) { @@ -457,11 +457,11 @@ public void handleResponse(PeersResponse response) { peersRequestInFlight = false; - response.getMasterNode().map(DiscoveryNode::getAddress).ifPresent(PeerFinder.this::startProbe); + response.getClusterManagerNode().map(DiscoveryNode::getAddress).ifPresent(PeerFinder.this::startProbe); response.getKnownPeers().stream().map(DiscoveryNode::getAddress).forEach(PeerFinder.this::startProbe); } - if (response.getMasterNode().equals(Optional.of(discoveryNode))) { + if (response.getClusterManagerNode().equals(Optional.of(discoveryNode))) { // Must not hold lock here to avoid deadlock assert holdsLock() == false : "PeerFinder mutex is held in error"; onActiveClusterManagerFound(discoveryNode, response.getTerm()); diff --git a/server/src/main/java/org/opensearch/env/NodeEnvironment.java b/server/src/main/java/org/opensearch/env/NodeEnvironment.java index 3da2cce8a2620..1eb64de2126d6 100644 --- a/server/src/main/java/org/opensearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/opensearch/env/NodeEnvironment.java @@ -354,12 +354,12 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce applySegmentInfosTrace(settings); assertCanWrite(); - if (DiscoveryNode.isMasterNode(settings) || DiscoveryNode.isDataNode(settings)) { + if (DiscoveryNode.isClusterManagerNode(settings) || DiscoveryNode.isDataNode(settings)) { ensureAtomicMoveSupported(nodePaths); } if (DiscoveryNode.isDataNode(settings) == false) { - if (DiscoveryNode.isMasterNode(settings) == false) { + if (DiscoveryNode.isClusterManagerNode(settings) == false) { ensureNoIndexMetadata(nodePaths); } diff --git a/server/src/main/java/org/opensearch/env/NodeRepurposeCommand.java b/server/src/main/java/org/opensearch/env/NodeRepurposeCommand.java index 2e9ea67a93944..58d6f33292273 100644 --- a/server/src/main/java/org/opensearch/env/NodeRepurposeCommand.java +++ b/server/src/main/java/org/opensearch/env/NodeRepurposeCommand.java @@ -96,7 +96,7 @@ protected void processNodePaths(Terminal terminal, Path[] dataPaths, int nodeLoc throws IOException { assert DiscoveryNode.isDataNode(env.settings()) == false; - if (DiscoveryNode.isMasterNode(env.settings()) == false) { + if (DiscoveryNode.isClusterManagerNode(env.settings()) == false) { processNoClusterManagerNoDataNode(terminal, dataPaths, env); } else { processClusterManagerNoDataNode(terminal, dataPaths, env); diff --git a/server/src/main/java/org/opensearch/gateway/Gateway.java b/server/src/main/java/org/opensearch/gateway/Gateway.java index f46f4317c83e1..413c08569c64a 100644 --- a/server/src/main/java/org/opensearch/gateway/Gateway.java +++ b/server/src/main/java/org/opensearch/gateway/Gateway.java @@ -70,7 +70,7 @@ public Gateway( } public void performStateRecovery(final GatewayStateRecoveredListener listener) throws GatewayException { - final String[] nodesIds = clusterService.state().nodes().getMasterNodes().keys().toArray(String.class); + final String[] nodesIds = clusterService.state().nodes().getClusterManagerNodes().keys().toArray(String.class); logger.trace("performing state recovery from {}", Arrays.toString(nodesIds)); final TransportNodesListGatewayMetaState.NodesGatewayMetaState nodesState = listGatewayMetaState.list(nodesIds, null).actionGet(); diff --git a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java index 610219ab1f054..f70fdea153893 100644 --- a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java @@ -127,7 +127,7 @@ public void start( ) { assert persistedState.get() == null : "should only start once, but already have " + persistedState.get(); - if (DiscoveryNode.isMasterNode(settings) || DiscoveryNode.isDataNode(settings)) { + if (DiscoveryNode.isClusterManagerNode(settings) || DiscoveryNode.isDataNode(settings)) { try { final PersistedClusterStateService.OnDiskState onDiskState = persistedClusterStateService.loadBestOnDiskState(); @@ -158,7 +158,7 @@ public void start( .build() ); - if (DiscoveryNode.isMasterNode(settings)) { + if (DiscoveryNode.isClusterManagerNode(settings)) { persistedState = new LucenePersistedState(persistedClusterStateService, currentTerm, clusterState); } else { persistedState = new AsyncLucenePersistedState( diff --git a/server/src/main/java/org/opensearch/gateway/GatewayService.java b/server/src/main/java/org/opensearch/gateway/GatewayService.java index 06e92b7f0d7cf..d04f3ee15d888 100644 --- a/server/src/main/java/org/opensearch/gateway/GatewayService.java +++ b/server/src/main/java/org/opensearch/gateway/GatewayService.java @@ -188,7 +188,7 @@ public GatewayService( @Override protected void doStart() { - if (DiscoveryNode.isMasterNode(clusterService.getSettings())) { + if (DiscoveryNode.isClusterManagerNode(clusterService.getSettings())) { // use post applied so that the state will be visible to the background recovery thread we spawn in performStateRecovery clusterService.addListener(this); } @@ -210,7 +210,7 @@ public void clusterChanged(final ClusterChangedEvent event) { final ClusterState state = event.state(); - if (state.nodes().isLocalNodeElectedMaster() == false) { + if (state.nodes().isLocalNodeElectedClusterManager() == false) { // not our job to recover return; } @@ -220,12 +220,12 @@ public void clusterChanged(final ClusterChangedEvent event) { } final DiscoveryNodes nodes = state.nodes(); - if (state.nodes().getMasterNodeId() == null) { + if (state.nodes().getClusterManagerNodeId() == null) { logger.debug("not recovering from gateway, no cluster-manager elected yet"); - } else if (recoverAfterNodes != -1 && (nodes.getMasterAndDataNodes().size()) < recoverAfterNodes) { + } else if (recoverAfterNodes != -1 && (nodes.getClusterManagerAndDataNodes().size()) < recoverAfterNodes) { logger.debug( "not recovering from gateway, nodes_size (data+master) [{}] < recover_after_nodes [{}]", - nodes.getMasterAndDataNodes().size(), + nodes.getClusterManagerAndDataNodes().size(), recoverAfterNodes ); } else if (recoverAfterDataNodes != -1 && nodes.getDataNodes().size() < recoverAfterDataNodes) { @@ -234,10 +234,10 @@ public void clusterChanged(final ClusterChangedEvent event) { nodes.getDataNodes().size(), recoverAfterDataNodes ); - } else if (recoverAfterClusterManagerNodes != -1 && nodes.getMasterNodes().size() < recoverAfterClusterManagerNodes) { + } else if (recoverAfterClusterManagerNodes != -1 && nodes.getClusterManagerNodes().size() < recoverAfterClusterManagerNodes) { logger.debug( "not recovering from gateway, nodes_size (master) [{}] < recover_after_master_nodes [{}]", - nodes.getMasterNodes().size(), + nodes.getClusterManagerNodes().size(), recoverAfterClusterManagerNodes ); } else { @@ -251,19 +251,24 @@ public void clusterChanged(final ClusterChangedEvent event) { // one of the expected is set, see if all of them meet the need, and ignore the timeout in this case enforceRecoverAfterTime = false; reason = ""; - if (expectedNodes != -1 && (nodes.getMasterAndDataNodes().size() < expectedNodes)) { // does not meet the expected... + if (expectedNodes != -1 && (nodes.getClusterManagerAndDataNodes().size() < expectedNodes)) { // does not meet the + // expected... enforceRecoverAfterTime = true; - reason = "expecting [" + expectedNodes + "] nodes, but only have [" + nodes.getMasterAndDataNodes().size() + "]"; + reason = "expecting [" + + expectedNodes + + "] nodes, but only have [" + + nodes.getClusterManagerAndDataNodes().size() + + "]"; } else if (expectedDataNodes != -1 && (nodes.getDataNodes().size() < expectedDataNodes)) { // does not meet the expected... enforceRecoverAfterTime = true; reason = "expecting [" + expectedDataNodes + "] data nodes, but only have [" + nodes.getDataNodes().size() + "]"; - } else if (expectedClusterManagerNodes != -1 && (nodes.getMasterNodes().size() < expectedClusterManagerNodes)) { + } else if (expectedClusterManagerNodes != -1 && (nodes.getClusterManagerNodes().size() < expectedClusterManagerNodes)) { // does not meet the expected... enforceRecoverAfterTime = true; reason = "expecting [" + expectedClusterManagerNodes + "] cluster-manager nodes, but only have [" - + nodes.getMasterNodes().size() + + nodes.getClusterManagerNodes().size() + "]"; } } @@ -341,7 +346,7 @@ public void clusterStateProcessed(final String source, final ClusterState oldSta } @Override - public void onNoLongerMaster(String source) { + public void onNoLongerClusterManager(String source) { logger.debug("stepped down as cluster-manager before recovering state [{}]", source); resetRecoveredFlags(); } diff --git a/server/src/main/java/org/opensearch/gateway/LocalAllocateDangledIndices.java b/server/src/main/java/org/opensearch/gateway/LocalAllocateDangledIndices.java index 98b3c104aadbf..735140ca5dc24 100644 --- a/server/src/main/java/org/opensearch/gateway/LocalAllocateDangledIndices.java +++ b/server/src/main/java/org/opensearch/gateway/LocalAllocateDangledIndices.java @@ -108,7 +108,7 @@ public LocalAllocateDangledIndices( public void allocateDangled(Collection indices, ActionListener listener) { ClusterState clusterState = clusterService.state(); - DiscoveryNode clusterManagerNode = clusterState.nodes().getMasterNode(); + DiscoveryNode clusterManagerNode = clusterState.nodes().getClusterManagerNode(); if (clusterManagerNode == null) { listener.onFailure(new ClusterManagerNotDiscoveredException("no cluster-manager to send allocate dangled request")); return; @@ -165,7 +165,7 @@ public ClusterState execute(ClusterState currentState) { indexMetadata.getIndex(), request.fromNode, indexMetadata.getCreationVersion(), - currentState.getNodes().getMasterNode().getVersion() + currentState.getNodes().getClusterManagerNode().getVersion() ); continue; } diff --git a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java index d63a178b4213a..55d95381923b3 100644 --- a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java @@ -135,7 +135,7 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L * to eagerly. As consequence, some of the methods in this class are not allowed to be called while a handoff is in progress, * in particular {@link #markAllocationIdAsInSync}. * - * A notable exception to this is the method {@link #updateFromMaster}, which is still allowed to be called during a relocation handoff. + * A notable exception to this is the method {@link #updateFromClusterManager}, which is still allowed to be called during a relocation handoff. * The reason for this is that the handoff might fail and can be aborted (using {@link #abortRelocationHandoff}), in which case * it is important that the global checkpoint tracker does not miss any state updates that might happened during the handoff attempt. * This means, however, that the global checkpoint can still advance after the primary relocation handoff has been initiated, but only @@ -1176,7 +1176,7 @@ private void addPeerRecoveryRetentionLeaseForSolePrimary() { * @param inSyncAllocationIds the allocation IDs of the currently in-sync shard copies * @param routingTable the shard routing table */ - public synchronized void updateFromMaster( + public synchronized void updateFromClusterManager( final long applyingClusterStateVersion, final Set inSyncAllocationIds, final IndexShardRoutingTable routingTable @@ -1239,6 +1239,22 @@ public synchronized void updateFromMaster( assert invariant(); } + /** + * Notifies the tracker of the current allocation IDs in the cluster state. + * @param applyingClusterStateVersion the cluster state version being applied when updating the allocation IDs from the cluster-manager + * @param inSyncAllocationIds the allocation IDs of the currently in-sync shard copies + * @param routingTable the shard routing table + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #updateFromClusterManager(long, Set, IndexShardRoutingTable)} + */ + @Deprecated + public synchronized void updateFromMaster( + final long applyingClusterStateVersion, + final Set inSyncAllocationIds, + final IndexShardRoutingTable routingTable + ) { + updateFromClusterManager(applyingClusterStateVersion, inSyncAllocationIds, routingTable); + } + /** * Called when the recovery process for a shard has opened the engine on the target shard. Ensures that the right data structures * have been set up locally to track local checkpoint information for the shard and that the shard is added to the replication group. @@ -1558,7 +1574,7 @@ private Runnable getClusterManagerUpdateOperationFromCurrentState() { } }); final IndexShardRoutingTable lastAppliedRoutingTable = routingTable; - return () -> updateFromMaster(lastAppliedClusterStateVersion, inSyncAllocationIds, lastAppliedRoutingTable); + return () -> updateFromClusterManager(lastAppliedClusterStateVersion, inSyncAllocationIds, lastAppliedRoutingTable); } /** diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 59aeef6209cf8..fbe81b9320de5 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -535,7 +535,7 @@ public void updateShardState( } if (newRouting.primary()) { - replicationTracker.updateFromMaster(applyingClusterStateVersion, inSyncAllocationIds, routingTable); + replicationTracker.updateFromClusterManager(applyingClusterStateVersion, inSyncAllocationIds, routingTable); } if (state == IndexShardState.POST_RECOVERY && newRouting.active()) { diff --git a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java index 7233b6893b03e..33d3bc2ba07b0 100644 --- a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java @@ -215,14 +215,14 @@ public IndicesClusterStateService( @Override protected void doStart() { // Doesn't make sense to manage shards on non-master and non-data nodes - if (DiscoveryNode.isDataNode(settings) || DiscoveryNode.isMasterNode(settings)) { + if (DiscoveryNode.isDataNode(settings) || DiscoveryNode.isClusterManagerNode(settings)) { clusterService.addHighPriorityApplier(this); } } @Override protected void doStop() { - if (DiscoveryNode.isDataNode(settings) || DiscoveryNode.isMasterNode(settings)) { + if (DiscoveryNode.isDataNode(settings) || DiscoveryNode.isClusterManagerNode(settings)) { clusterService.removeApplier(this); } } @@ -280,7 +280,7 @@ private void updateFailedShardsCache(final ClusterState state) { return; } - DiscoveryNode clusterManagerNode = state.nodes().getMasterNode(); + DiscoveryNode clusterManagerNode = state.nodes().getClusterManagerNode(); // remove items from cache which are not in our routing table anymore and // resend failures that have not executed on cluster-manager yet @@ -517,7 +517,7 @@ private void createIndices(final ClusterState state) { indexService = indicesService.createIndex(indexMetadata, buildInIndexListener, true); if (indexService.updateMapping(null, indexMetadata) && sendRefreshMapping) { nodeMappingRefreshAction.nodeMappingRefresh( - state.nodes().getMasterNode(), + state.nodes().getClusterManagerNode(), new NodeMappingRefreshAction.NodeMappingRefreshRequest( indexMetadata.getIndex().getName(), indexMetadata.getIndexUUID(), @@ -564,7 +564,7 @@ private void updateIndices(ClusterChangedEvent event) { reason = "mapping update failed"; if (indexService.updateMapping(currentIndexMetadata, newIndexMetadata) && sendRefreshMapping) { nodeMappingRefreshAction.nodeMappingRefresh( - state.nodes().getMasterNode(), + state.nodes().getClusterManagerNode(), new NodeMappingRefreshAction.NodeMappingRefreshRequest( newIndexMetadata.getIndex().getName(), newIndexMetadata.getIndexUUID(), @@ -690,15 +690,15 @@ private void updateShard( "{} cluster-manager marked shard as initializing, but shard has state [{}], resending shard started to {}", shardRouting.shardId(), state, - nodes.getMasterNode() + nodes.getClusterManagerNode() ); } - if (nodes.getMasterNode() != null) { + if (nodes.getClusterManagerNode() != null) { shardStateAction.shardStarted( shardRouting, primaryTerm, "master " - + nodes.getMasterNode() + + nodes.getClusterManagerNode() + " marked shard as initializing, but shard state is [" + state + "], mark shard as started", @@ -864,7 +864,7 @@ public interface Shard { * - Updates and persists the new routing value. * - Updates the primary term if this shard is a primary. * - Updates the allocation ids that are tracked by the shard if it is a primary. - * See {@link ReplicationTracker#updateFromMaster(long, Set, IndexShardRoutingTable)} for details. + * See {@link ReplicationTracker#updateFromClusterManager(long, Set, IndexShardRoutingTable)} for details. * * @param shardRouting the new routing entry * @param primaryTerm the new primary term diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 4e0fb39db80d2..eb2604af0147b 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -737,7 +737,7 @@ protected Node( systemIndices, scriptService ); - if (DiscoveryNode.isMasterNode(settings)) { + if (DiscoveryNode.isClusterManagerNode(settings)) { clusterService.addListener(new SystemIndexMetadataUpgradeService(systemIndices, clusterService)); } new TemplateUpgradeService(client, clusterService, threadPool, indexTemplateMetadataUpgraders); @@ -1144,7 +1144,7 @@ public Node start() throws NodeValidationException { ClusterState clusterState = clusterService.state(); ClusterStateObserver observer = new ClusterStateObserver(clusterState, clusterService, null, logger, thread.getThreadContext()); - if (clusterState.nodes().getMasterNodeId() == null) { + if (clusterState.nodes().getClusterManagerNodeId() == null) { logger.debug("waiting to join the cluster. timeout [{}]", initialStateTimeout); final CountDownLatch latch = new CountDownLatch(1); observer.waitForNextChange(new ClusterStateObserver.Listener() { @@ -1163,7 +1163,7 @@ public void onTimeout(TimeValue timeout) { logger.warn("timed out while waiting for initial discovery state - timeout: {}", initialStateTimeout); latch.countDown(); } - }, state -> state.nodes().getMasterNodeId() != null, initialStateTimeout); + }, state -> state.nodes().getClusterManagerNodeId() != null, initialStateTimeout); try { latch.await(); @@ -1486,7 +1486,7 @@ protected ClusterInfoService newClusterInfoService( NodeClient client ) { final InternalClusterInfoService service = new InternalClusterInfoService(settings, clusterService, threadPool, client); - if (DiscoveryNode.isMasterNode(settings)) { + if (DiscoveryNode.isClusterManagerNode(settings)) { // listen for state changes (this node starts/stops being the elected cluster manager, or new nodes are added) clusterService.addListener(service); } diff --git a/server/src/main/java/org/opensearch/persistent/PersistentTasksClusterService.java b/server/src/main/java/org/opensearch/persistent/PersistentTasksClusterService.java index b9cb03854829d..9dc7f7d7380cc 100644 --- a/server/src/main/java/org/opensearch/persistent/PersistentTasksClusterService.java +++ b/server/src/main/java/org/opensearch/persistent/PersistentTasksClusterService.java @@ -93,7 +93,7 @@ public PersistentTasksClusterService( this.decider = new EnableAssignmentDecider(settings, clusterService.getClusterSettings()); this.threadPool = threadPool; this.periodicRechecker = new PeriodicRechecker(CLUSTER_TASKS_ALLOCATION_RECHECK_INTERVAL_SETTING.get(settings)); - if (DiscoveryNode.isMasterNode(settings)) { + if (DiscoveryNode.isClusterManagerNode(settings)) { clusterService.addListener(this); } clusterService.getClusterSettings() @@ -356,7 +356,7 @@ private Assignment createAssignment( @Override public void clusterChanged(ClusterChangedEvent event) { - if (event.localNodeMaster()) { + if (event.localNodeClusterManager()) { if (shouldReassignPersistentTasks(event)) { // We want to avoid a periodic check duplicating this work periodicRechecker.cancel(); @@ -409,7 +409,7 @@ boolean shouldReassignPersistentTasks(final ClusterChangedEvent event) { return false; } - boolean masterChanged = event.previousState().nodes().isLocalNodeElectedMaster() == false; + boolean masterChanged = event.previousState().nodes().isLocalNodeElectedClusterManager() == false; if (persistentTasksChanged(event) || event.nodesChanged() @@ -518,7 +518,7 @@ protected boolean mustReschedule() { @Override public void runInternal() { - if (clusterService.localNode().isMasterNode()) { + if (clusterService.localNode().isClusterManagerNode()) { final ClusterState state = clusterService.state(); logger.trace("periodic persistent task assignment check running for cluster state {}", state.getVersion()); if (isAnyTaskUnassigned(state.getMetadata().custom(PersistentTasksCustomMetadata.TYPE))) { diff --git a/server/src/main/java/org/opensearch/repositories/RepositoriesService.java b/server/src/main/java/org/opensearch/repositories/RepositoriesService.java index 5b4e30f8495e8..c70c10495b7b5 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoriesService.java @@ -126,7 +126,7 @@ public RepositoriesService( this.threadPool = threadPool; // Doesn't make sense to maintain repositories on non-master and non-data nodes // Nothing happens there anyway - if (DiscoveryNode.isDataNode(settings) || DiscoveryNode.isMasterNode(settings)) { + if (DiscoveryNode.isDataNode(settings) || DiscoveryNode.isClusterManagerNode(settings)) { if (isDedicatedVotingOnlyNode(DiscoveryNode.getRolesFromSettings(settings)) == false) { clusterService.addHighPriorityApplier(this); } @@ -238,7 +238,7 @@ public void onFailure(String source, Exception e) { @Override public boolean mustAck(DiscoveryNode discoveryNode) { // repository is created on both cluster-manager and data nodes - return discoveryNode.isMasterNode() || discoveryNode.isDataNode(); + return discoveryNode.isClusterManagerNode() || discoveryNode.isDataNode(); } } ); @@ -293,7 +293,7 @@ public ClusterState execute(ClusterState currentState) { @Override public boolean mustAck(DiscoveryNode discoveryNode) { // repository was created on both cluster-manager and data nodes - return discoveryNode.isMasterNode() || discoveryNode.isDataNode(); + return discoveryNode.isClusterManagerNode() || discoveryNode.isDataNode(); } } ); diff --git a/server/src/main/java/org/opensearch/repositories/VerifyNodeRepositoryAction.java b/server/src/main/java/org/opensearch/repositories/VerifyNodeRepositoryAction.java index 98e8521ecff57..3c6d948aee453 100644 --- a/server/src/main/java/org/opensearch/repositories/VerifyNodeRepositoryAction.java +++ b/server/src/main/java/org/opensearch/repositories/VerifyNodeRepositoryAction.java @@ -96,7 +96,7 @@ public void verify(String repository, String verificationToken, final ActionList final DiscoveryNodes discoNodes = clusterService.state().nodes(); final DiscoveryNode localNode = discoNodes.getLocalNode(); - final ObjectContainer masterAndDataNodes = discoNodes.getMasterAndDataNodes().values(); + final ObjectContainer masterAndDataNodes = discoNodes.getClusterManagerAndDataNodes().values(); final List nodes = new ArrayList<>(); for (ObjectCursor cursor : masterAndDataNodes) { DiscoveryNode node = cursor.value; diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestClusterManagerAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestClusterManagerAction.java index e6192c6f1e7c9..8f7f9e5bd20a7 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestClusterManagerAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestClusterManagerAction.java @@ -113,7 +113,7 @@ private Table buildTable(RestRequest request, ClusterStateResponse state) { DiscoveryNodes nodes = state.getState().nodes(); table.startRow(); - DiscoveryNode clusterManager = nodes.get(nodes.getMasterNodeId()); + DiscoveryNode clusterManager = nodes.get(nodes.getClusterManagerNodeId()); if (clusterManager == null) { table.addCell("-"); table.addCell("-"); diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestHealthAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestHealthAction.java index 717df832d5d73..b4d336f4c10c0 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestHealthAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestHealthAction.java @@ -119,7 +119,7 @@ private Table buildTable(final ClusterHealthResponse health, final RestRequest r t.addCell(health.getStatus().name().toLowerCase(Locale.ROOT)); t.addCell(health.getNumberOfNodes()); t.addCell(health.getNumberOfDataNodes()); - t.addCell(health.hasDiscoveredMaster()); + t.addCell(health.hasDiscoveredClusterManager()); t.addCell(health.getActiveShards()); t.addCell(health.getActivePrimaryShards()); t.addCell(health.getRelocatingShards()); diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java index e0cc1e6af6467..8d3081bec48e9 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java @@ -343,7 +343,7 @@ Table buildTable( ) { DiscoveryNodes nodes = state.getState().nodes(); - String clusterManagerId = nodes.getMasterNodeId(); + String clusterManagerId = nodes.getClusterManagerNodeId(); Table table = getTableWithHeader(req); for (DiscoveryNode node : nodes) { diff --git a/server/src/main/java/org/opensearch/snapshots/InternalSnapshotsInfoService.java b/server/src/main/java/org/opensearch/snapshots/InternalSnapshotsInfoService.java index 10328d3e2ce9f..518e4a558b2df 100644 --- a/server/src/main/java/org/opensearch/snapshots/InternalSnapshotsInfoService.java +++ b/server/src/main/java/org/opensearch/snapshots/InternalSnapshotsInfoService.java @@ -130,7 +130,7 @@ public InternalSnapshotsInfoService( this.maxConcurrentFetches = INTERNAL_SNAPSHOT_INFO_MAX_CONCURRENT_FETCHES_SETTING.get(settings); final ClusterSettings clusterSettings = clusterService.getClusterSettings(); clusterSettings.addSettingsUpdateConsumer(INTERNAL_SNAPSHOT_INFO_MAX_CONCURRENT_FETCHES_SETTING, this::setMaxConcurrentFetches); - if (DiscoveryNode.isMasterNode(settings)) { + if (DiscoveryNode.isClusterManagerNode(settings)) { clusterService.addListener(this); } } @@ -155,7 +155,7 @@ public SnapshotShardSizeInfo snapshotShardSizes() { @Override public void clusterChanged(ClusterChangedEvent event) { - if (event.localNodeMaster()) { + if (event.localNodeClusterManager()) { final Set onGoingSnapshotRecoveries = listOfSnapshotShards(event.state()); int unknownShards = 0; @@ -180,7 +180,7 @@ public void clusterChanged(ClusterChangedEvent event) { fetchNextSnapshotShard(); } - } else if (event.previousState().nodes().isLocalNodeElectedMaster()) { + } else if (event.previousState().nodes().isLocalNodeElectedClusterManager()) { // TODO Maybe just clear out non-ongoing snapshot recoveries is the node is cluster-manager eligible, so that we don't // have to repopulate the data over and over in an unstable cluster-manager situation? synchronized (mutex) { diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index 918191879f1d0..a14de23d81d09 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -192,7 +192,7 @@ public RestoreService( this.allocationService = allocationService; this.createIndexService = createIndexService; this.metadataIndexUpgradeService = metadataIndexUpgradeService; - if (DiscoveryNode.isMasterNode(clusterService.getSettings())) { + if (DiscoveryNode.isClusterManagerNode(clusterService.getSettings())) { clusterService.addStateApplier(this); } this.clusterSettings = clusterService.getClusterSettings(); @@ -1060,7 +1060,7 @@ public void onFailure(final String source, final Exception e) { } @Override - public void onNoLongerMaster(String source) { + public void onNoLongerClusterManager(String source) { logger.debug("no longer cluster-manager while processing restore state update [{}]", source); } @@ -1210,7 +1210,7 @@ public static Set restoringIndices(final ClusterState currentState, final @Override public void applyClusterState(ClusterChangedEvent event) { try { - if (event.localNodeMaster()) { + if (event.localNodeClusterManager()) { cleanupRestoreState(event); } } catch (Exception t) { diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java index 695426dea6864..22a85558e3b1d 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java @@ -150,8 +150,8 @@ public void clusterChanged(ClusterChangedEvent event) { } } - String previousClusterManagerNodeId = event.previousState().nodes().getMasterNodeId(); - String currentMasterNodeId = event.state().nodes().getMasterNodeId(); + String previousClusterManagerNodeId = event.previousState().nodes().getClusterManagerNodeId(); + String currentMasterNodeId = event.state().nodes().getClusterManagerNodeId(); if (currentMasterNodeId != null && currentMasterNodeId.equals(previousClusterManagerNodeId) == false) { syncShardStatsOnNewMaster(event); } diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java index c87bf33727524..024fb6ec53089 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java @@ -235,7 +235,7 @@ public SnapshotsService( actionFilters, indexNameExpressionResolver ); - if (DiscoveryNode.isMasterNode(settings)) { + if (DiscoveryNode.isClusterManagerNode(settings)) { // addLowPriorityApplier to make sure that Repository will be created before snapshot clusterService.addLowPriorityApplier(this); maxConcurrentOperations = MAX_CONCURRENT_SNAPSHOT_OPERATIONS_SETTING.get(settings); @@ -1120,7 +1120,7 @@ public void onFailure(String source, Exception e) { } @Override - public void onNoLongerMaster(String source) { + public void onNoLongerClusterManager(String source) { // We are not longer a cluster-manager - we shouldn't try to do any cleanup // The new cluster-manager will take care of it logger.warn( @@ -1181,7 +1181,7 @@ public void onFailure(@Nullable Exception e) { userCreateSnapshotListener.onFailure(ExceptionsHelper.useOrSuppress(e, this.e)); } - public void onNoLongerMaster() { + public void onNoLongerClusterManager() { userCreateSnapshotListener.onFailure(e); } } @@ -1305,11 +1305,11 @@ public static List currentSnapshots( @Override public void applyClusterState(ClusterChangedEvent event) { try { - if (event.localNodeMaster()) { + if (event.localNodeClusterManager()) { // We don't remove old cluster-manager when cluster-manager flips anymore. So, we need to check for change in // cluster-manager SnapshotsInProgress snapshotsInProgress = event.state().custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY); - final boolean newClusterManager = event.previousState().nodes().isLocalNodeElectedMaster() == false; + final boolean newClusterManager = event.previousState().nodes().isLocalNodeElectedClusterManager() == false; processExternalChanges( newClusterManager || removedNodesCleanupNeeded(snapshotsInProgress, event.nodesDelta().removedNodes()), event.routingTableChanged() && waitingShardsStartedOrUnassigned(snapshotsInProgress, event) @@ -2091,12 +2091,12 @@ public void onFailure(String source, Exception e) { } @Override - public void onNoLongerMaster(String source) { + public void onNoLongerClusterManager(String source) { failure.addSuppressed(new SnapshotException(snapshot, "no longer cluster-manager")); failSnapshotCompletionListeners(snapshot, failure); failAllListenersOnMasterFailOver(new NotClusterManagerException(source)); if (listener != null) { - listener.onNoLongerMaster(); + listener.onNoLongerClusterManager(); } } diff --git a/server/src/main/java/org/opensearch/transport/ConnectionProfile.java b/server/src/main/java/org/opensearch/transport/ConnectionProfile.java index 708d1a3a0e4c1..b9764c0c53f4a 100644 --- a/server/src/main/java/org/opensearch/transport/ConnectionProfile.java +++ b/server/src/main/java/org/opensearch/transport/ConnectionProfile.java @@ -103,7 +103,10 @@ public static ConnectionProfile buildDefaultConnectionProfile(Settings settings) builder.addConnections(connectionsPerNodeBulk, TransportRequestOptions.Type.BULK); builder.addConnections(connectionsPerNodePing, TransportRequestOptions.Type.PING); // if we are not cluster-manager eligible we don't need a dedicated channel to publish the state - builder.addConnections(DiscoveryNode.isMasterNode(settings) ? connectionsPerNodeState : 0, TransportRequestOptions.Type.STATE); + builder.addConnections( + DiscoveryNode.isClusterManagerNode(settings) ? connectionsPerNodeState : 0, + TransportRequestOptions.Type.STATE + ); // if we are not a data-node we don't need any dedicated channels for recovery builder.addConnections(DiscoveryNode.isDataNode(settings) ? connectionsPerNodeRecovery : 0, TransportRequestOptions.Type.RECOVERY); builder.addConnections(connectionsPerNodeReg, TransportRequestOptions.Type.REG); diff --git a/server/src/main/java/org/opensearch/transport/SniffConnectionStrategy.java b/server/src/main/java/org/opensearch/transport/SniffConnectionStrategy.java index 3e95c937a296f..47fd5fc2c9489 100644 --- a/server/src/main/java/org/opensearch/transport/SniffConnectionStrategy.java +++ b/server/src/main/java/org/opensearch/transport/SniffConnectionStrategy.java @@ -141,7 +141,7 @@ public class SniffConnectionStrategy extends RemoteConnectionStrategy { static final int CHANNELS_PER_CONNECTION = 6; private static final Predicate DEFAULT_NODE_PREDICATE = (node) -> Version.CURRENT.isCompatible(node.getVersion()) - && (node.isMasterNode() == false || node.isDataNode() || node.isIngestNode()); + && (node.isClusterManagerNode() == false || node.isDataNode() || node.isIngestNode()); private final List configuredSeedNodes; private final List> seedNodes; diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java index dfd6d059cc3a8..0c728c8314517 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java @@ -175,7 +175,7 @@ public void setupForTest() { .add(otherNode2) .add(otherDataNode) .localNodeId(localNode.getId()) - .masterNodeId(localNode.getId()) + .clusterManagerNodeId(localNode.getId()) ) .metadata( Metadata.builder() diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsActionTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsActionTests.java index 604eee4763c89..b3620bf757256 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsActionTests.java @@ -126,7 +126,11 @@ public void setupForTest() { transportService.acceptIncomingRequests(); final ClusterState.Builder builder = builder(new ClusterName("cluster")).nodes( - new Builder().add(localNode).add(otherNode1).add(otherNode2).localNodeId(localNode.getId()).masterNodeId(localNode.getId()) + new Builder().add(localNode) + .add(otherNode1) + .add(otherNode2) + .localNodeId(localNode.getId()) + .clusterManagerNodeId(localNode.getId()) ); builder.metadata( Metadata.builder() diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponsesTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponsesTests.java index b33f5c7bd5bc7..c71b5faea1bcf 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponsesTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponsesTests.java @@ -114,7 +114,7 @@ public void testClusterHealthVerifyClusterManagerNodeDiscovery() throws IOExcept DiscoveryNode localNode = new DiscoveryNode("node", OpenSearchTestCase.buildNewFakeTransportAddress(), Version.CURRENT); // set the node information to verify cluster_manager_node discovery in ClusterHealthResponse ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) - .nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).masterNodeId(localNode.getId())) + .nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).clusterManagerNodeId(localNode.getId())) .build(); int pendingTasks = randomIntBetween(0, 200); int inFlight = randomIntBetween(0, 200); @@ -130,7 +130,7 @@ public void testClusterHealthVerifyClusterManagerNodeDiscovery() throws IOExcept pendingTaskInQueueTime ); clusterHealth = maybeSerialize(clusterHealth); - assertThat(clusterHealth.getClusterStateHealth().hasDiscoveredMaster(), Matchers.equalTo(true)); + assertThat(clusterHealth.getClusterStateHealth().hasDiscoveredClusterManager(), Matchers.equalTo(true)); assertClusterHealth(clusterHealth); } @@ -144,7 +144,7 @@ private void assertClusterHealth(ClusterHealthResponse clusterHealth) { assertThat(clusterHealth.getUnassignedShards(), Matchers.equalTo(clusterStateHealth.getUnassignedShards())); assertThat(clusterHealth.getNumberOfNodes(), Matchers.equalTo(clusterStateHealth.getNumberOfNodes())); assertThat(clusterHealth.getNumberOfDataNodes(), Matchers.equalTo(clusterStateHealth.getNumberOfDataNodes())); - assertThat(clusterHealth.hasDiscoveredMaster(), Matchers.equalTo(clusterStateHealth.hasDiscoveredMaster())); + assertThat(clusterHealth.hasDiscoveredClusterManager(), Matchers.equalTo(clusterStateHealth.hasDiscoveredClusterManager())); } ClusterHealthResponse maybeSerialize(ClusterHealthResponse clusterHealth) throws IOException { @@ -175,7 +175,7 @@ public void testParseFromXContentWithDiscoveredClusterManagerField() throws IOEx assertNotNull(clusterHealth); assertThat(clusterHealth.getClusterName(), Matchers.equalTo("535799904437:7-1-3-node")); assertThat(clusterHealth.getNumberOfNodes(), Matchers.equalTo(6)); - assertThat(clusterHealth.hasDiscoveredMaster(), Matchers.equalTo(true)); + assertThat(clusterHealth.hasDiscoveredClusterManager(), Matchers.equalTo(true)); } } @@ -196,7 +196,7 @@ public void testParseFromXContentWithoutDiscoveredClusterManagerField() throws I assertNotNull(clusterHealth); assertThat(clusterHealth.getClusterName(), Matchers.equalTo("535799904437:7-1-3-node")); assertThat(clusterHealth.getNumberOfNodes(), Matchers.equalTo(6)); - assertThat(clusterHealth.hasDiscoveredMaster(), Matchers.equalTo(false)); + assertThat(clusterHealth.hasDiscoveredClusterManager(), Matchers.equalTo(false)); } } @@ -218,7 +218,7 @@ public void testParseFromXContentWithDeprecatedDiscoveredMasterField() throws IO ) ) { ClusterHealthResponse clusterHealth = ClusterHealthResponse.fromXContent(parser); - assertThat(clusterHealth.hasDiscoveredMaster(), Matchers.equalTo(true)); + assertThat(clusterHealth.hasDiscoveredClusterManager(), Matchers.equalTo(true)); } try ( @@ -234,7 +234,7 @@ public void testParseFromXContentWithDeprecatedDiscoveredMasterField() throws IO ) ) { ClusterHealthResponse clusterHealth = ClusterHealthResponse.fromXContent(parser); - assertThat(clusterHealth.hasDiscoveredMaster(), Matchers.equalTo(true)); + assertThat(clusterHealth.hasDiscoveredClusterManager(), Matchers.equalTo(true)); } } @@ -384,7 +384,7 @@ protected ClusterHealthResponse mutateInstance(ClusterHealthResponse instance) { state.getUnassignedShards(), state.getNumberOfNodes(), state.getNumberOfDataNodes(), - state.hasDiscoveredMaster(), + state.hasDiscoveredClusterManager(), state.getActiveShardsPercent(), state.getStatus(), state.getIndices() diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java index 7a7bc18876932..94f9be6c3ee85 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java @@ -61,7 +61,7 @@ public class ClusterRerouteResponseTests extends OpenSearchTestCase { public void testToXContent() throws IOException { DiscoveryNode node0 = new DiscoveryNode("node0", new TransportAddress(TransportAddress.META_ADDRESS, 9000), Version.CURRENT); - DiscoveryNodes nodes = new DiscoveryNodes.Builder().add(node0).masterNodeId(node0.getId()).build(); + DiscoveryNodes nodes = new DiscoveryNodes.Builder().add(node0).clusterManagerNodeId(node0.getId()).build(); IndexMetadata indexMetadata = IndexMetadata.builder("index") .settings( Settings.builder() diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/state/ClusterStateResponseTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/state/ClusterStateResponseTests.java index 6455cd553f932..1cf5ecd9fde9e 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/state/ClusterStateResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/state/ClusterStateResponseTests.java @@ -49,7 +49,7 @@ protected ClusterStateResponse createTestInstance() { if (randomBoolean()) { ClusterState.Builder clusterStateBuilder = ClusterState.builder(clusterName).version(randomNonNegativeLong()); if (randomBoolean()) { - clusterStateBuilder.nodes(DiscoveryNodes.builder().masterNodeId(randomAlphaOfLength(4)).build()); + clusterStateBuilder.nodes(DiscoveryNodes.builder().clusterManagerNodeId(randomAlphaOfLength(4)).build()); } clusterState = clusterStateBuilder.build(); } diff --git a/server/src/test/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java b/server/src/test/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java index 930fe4ad6049d..bf43823f47079 100644 --- a/server/src/test/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java @@ -284,7 +284,7 @@ void setClusterState(ClusterService clusterService, String index) { } } discoBuilder.localNodeId(newNode(0).getId()); - discoBuilder.masterNodeId(newNode(numberOfNodes - 1).getId()); + discoBuilder.clusterManagerNodeId(newNode(numberOfNodes - 1).getId()); ClusterState.Builder stateBuilder = ClusterState.builder(new ClusterName(TEST_CLUSTER)); stateBuilder.nodes(discoBuilder); final IndexMetadata.Builder indexMetadata = IndexMetadata.builder(index) @@ -374,7 +374,7 @@ public void testRequestsAreNotSentToFailedClusterManager() { Request request = new Request(new String[] { TEST_INDEX }); PlainActionFuture listener = new PlainActionFuture<>(); - DiscoveryNode clusterManagerNode = clusterService.state().nodes().getMasterNode(); + DiscoveryNode clusterManagerNode = clusterService.state().nodes().getClusterManagerNode(); DiscoveryNodes.Builder builder = DiscoveryNodes.builder(clusterService.state().getNodes()); builder.remove(clusterManagerNode.getId()); @@ -460,10 +460,10 @@ public void testResultAggregation() throws ExecutionException, InterruptedExcept final boolean simulateFailedClusterManagerNode = rarely(); DiscoveryNode failedClusterManagerNode = null; if (simulateFailedClusterManagerNode) { - failedClusterManagerNode = clusterService.state().nodes().getMasterNode(); + failedClusterManagerNode = clusterService.state().nodes().getClusterManagerNode(); DiscoveryNodes.Builder builder = DiscoveryNodes.builder(clusterService.state().getNodes()); builder.remove(failedClusterManagerNode.getId()); - builder.masterNodeId(null); + builder.clusterManagerNodeId(null); setState(clusterService, ClusterState.builder(clusterService.state()).nodes(builder)); } diff --git a/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java index acfdf7982e9d0..f0b62914213c1 100644 --- a/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java @@ -405,7 +405,7 @@ public void testDelegateToClusterManager() throws ExecutionException, Interrupte assertThat(transport.capturedRequests().length, equalTo(1)); CapturingTransport.CapturedRequest capturedRequest = transport.capturedRequests()[0]; - assertTrue(capturedRequest.node.isMasterNode()); + assertTrue(capturedRequest.node.isClusterManagerNode()); assertThat(capturedRequest.request, equalTo(request)); assertThat(capturedRequest.action, equalTo("internal:testAction")); @@ -432,7 +432,7 @@ public void testDelegateToFailingClusterManager() throws ExecutionException, Int CapturingTransport.CapturedRequest[] capturedRequests = transport.getCapturedRequestsAndClear(); assertThat(capturedRequests.length, equalTo(1)); CapturingTransport.CapturedRequest capturedRequest = capturedRequests[0]; - assertTrue(capturedRequest.node.isMasterNode()); + assertTrue(capturedRequest.node.isClusterManagerNode()); assertThat(capturedRequest.request, equalTo(request)); assertThat(capturedRequest.action, equalTo("internal:testAction")); @@ -447,14 +447,14 @@ public void testDelegateToFailingClusterManager() throws ExecutionException, Int if (randomBoolean()) { // simulate cluster-manager node removal final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(clusterService.state().nodes()); - nodesBuilder.masterNodeId(null); + nodesBuilder.clusterManagerNodeId(null); setState(clusterService, ClusterState.builder(clusterService.state()).nodes(nodesBuilder)); } if (randomBoolean()) { // reset the same state to increment a version simulating a join of an existing node // simulating use being disconnected final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(clusterService.state().nodes()); - nodesBuilder.masterNodeId(clusterManagerNode.getId()); + nodesBuilder.clusterManagerNodeId(clusterManagerNode.getId()); setState(clusterService, ClusterState.builder(clusterService.state()).nodes(nodesBuilder)); } else { // simulate cluster-manager restart followed by a state recovery - this will reset the cluster state version @@ -466,7 +466,7 @@ public void testDelegateToFailingClusterManager() throws ExecutionException, Int clusterManagerNode.getVersion() ); nodesBuilder.add(clusterManagerNode); - nodesBuilder.masterNodeId(clusterManagerNode.getId()); + nodesBuilder.clusterManagerNodeId(clusterManagerNode.getId()); final ClusterState.Builder builder = ClusterState.builder(clusterService.state()).nodes(nodesBuilder); setState(clusterService, builder.version(0)); } @@ -474,7 +474,7 @@ public void testDelegateToFailingClusterManager() throws ExecutionException, Int capturedRequests = transport.getCapturedRequestsAndClear(); assertThat(capturedRequests.length, equalTo(1)); capturedRequest = capturedRequests[0]; - assertTrue(capturedRequest.node.isMasterNode()); + assertTrue(capturedRequest.node.isClusterManagerNode()); assertThat(capturedRequest.request, equalTo(request)); assertThat(capturedRequest.action, equalTo("internal:testAction")); } else if (failsWithConnectTransportException) { @@ -524,7 +524,7 @@ protected void masterOperation(Request request, ClusterState state, ActionListen assertThat(transport.capturedRequests().length, equalTo(1)); CapturingTransport.CapturedRequest capturedRequest = transport.capturedRequests()[0]; - assertTrue(capturedRequest.node.isMasterNode()); + assertTrue(capturedRequest.node.isClusterManagerNode()); assertThat(capturedRequest.request, equalTo(request)); assertThat(capturedRequest.action, equalTo("internal:testAction")); @@ -559,7 +559,7 @@ public void testDelegateToClusterManagerOnNodeWithDeprecatedMasterRole() throws assertThat(transport.capturedRequests().length, equalTo(1)); CapturingTransport.CapturedRequest capturedRequest = transport.capturedRequests()[0]; - assertTrue(capturedRequest.node.isMasterNode()); + assertTrue(capturedRequest.node.isClusterManagerNode()); assertThat(capturedRequest.request, equalTo(request)); assertThat(capturedRequest.action, equalTo("internal:testAction")); diff --git a/server/src/test/java/org/opensearch/action/support/nodes/TransportNodesActionTests.java b/server/src/test/java/org/opensearch/action/support/nodes/TransportNodesActionTests.java index 53ebceb25510a..86657a98d9a1d 100644 --- a/server/src/test/java/org/opensearch/action/support/nodes/TransportNodesActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/nodes/TransportNodesActionTests.java @@ -226,7 +226,7 @@ public void setUp() throws Exception { discoveryNodes.add(node); } discoBuilder.localNodeId(randomFrom(discoveryNodes).getId()); - discoBuilder.masterNodeId(randomFrom(discoveryNodes).getId()); + discoBuilder.clusterManagerNodeId(randomFrom(discoveryNodes).getId()); ClusterState.Builder stateBuilder = ClusterState.builder(clusterService.getClusterName()); stateBuilder.nodes(discoBuilder); ClusterState clusterState = stateBuilder.build(); diff --git a/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java b/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java index b034b335bd9a3..98972d4063a97 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java @@ -142,7 +142,7 @@ public void setUp() throws Exception { Set roles = new HashSet<>(DiscoveryNodeRole.BUILT_IN_ROLES); DiscoveryNode node1 = new DiscoveryNode("_name1", "_node1", buildNewFakeTransportAddress(), emptyMap(), roles, Version.CURRENT); DiscoveryNode node2 = new DiscoveryNode("_name2", "_node2", buildNewFakeTransportAddress(), emptyMap(), roles, Version.CURRENT); - state.nodes(DiscoveryNodes.builder().add(node1).add(node2).localNodeId(node1.getId()).masterNodeId(node1.getId())); + state.nodes(DiscoveryNodes.builder().add(node1).add(node2).localNodeId(node1.getId()).clusterManagerNodeId(node1.getId())); shardId = new ShardId("index", UUID.randomUUID().toString(), 0); ShardRouting shardRouting = newShardRouting( diff --git a/server/src/test/java/org/opensearch/cluster/ClusterChangedEventTests.java b/server/src/test/java/org/opensearch/cluster/ClusterChangedEventTests.java index 16f21a48d7ab8..df7c10cd1acd8 100644 --- a/server/src/test/java/org/opensearch/cluster/ClusterChangedEventTests.java +++ b/server/src/test/java/org/opensearch/cluster/ClusterChangedEventTests.java @@ -115,11 +115,11 @@ public void testLocalNodeIsClusterManager() { ClusterState previousState = createSimpleClusterState(); ClusterState newState = createState(numNodesInCluster, true, initialIndices); ClusterChangedEvent event = new ClusterChangedEvent("_na_", newState, previousState); - assertTrue("local node should be cluster-manager", event.localNodeMaster()); + assertTrue("local node should be cluster-manager", event.localNodeClusterManager()); newState = createState(numNodesInCluster, false, initialIndices); event = new ClusterChangedEvent("_na_", newState, previousState); - assertFalse("local node should not be cluster-manager", event.localNodeMaster()); + assertFalse("local node should not be cluster-manager", event.localNodeClusterManager()); } /** @@ -319,10 +319,10 @@ public void testLocalNodeIsClusterManagerWithDeprecatedMasterRole() { final DiscoveryNodes.Builder builderLocalIsMaster = DiscoveryNodes.builder(); final DiscoveryNode node0 = newNode("node_0", Set.of(DiscoveryNodeRole.MASTER_ROLE)); final DiscoveryNode node1 = newNode("node_1", Set.of(DiscoveryNodeRole.DATA_ROLE)); - builderLocalIsMaster.add(node0).add(node1).masterNodeId(node0.getId()).localNodeId(node0.getId()); + builderLocalIsMaster.add(node0).add(node1).clusterManagerNodeId(node0.getId()).localNodeId(node0.getId()); final DiscoveryNodes.Builder builderLocalNotMaster = DiscoveryNodes.builder(); - builderLocalNotMaster.add(node0).add(node1).masterNodeId(node0.getId()).localNodeId(node1.getId()); + builderLocalNotMaster.add(node0).add(node1).clusterManagerNodeId(node0.getId()).localNodeId(node1.getId()); ClusterState previousState = createSimpleClusterState(); final Metadata metadata = createMetadata(initialIndices); @@ -332,7 +332,7 @@ public void testLocalNodeIsClusterManagerWithDeprecatedMasterRole() { .routingTable(createRoutingTable(1, metadata)) .build(); ClusterChangedEvent event = new ClusterChangedEvent("_na_", newState, previousState); - assertTrue("local node should be master", event.localNodeMaster()); + assertTrue("local node should be master", event.localNodeClusterManager()); newState = ClusterState.builder(TEST_CLUSTER_NAME) .nodes(builderLocalNotMaster.build()) @@ -340,7 +340,7 @@ public void testLocalNodeIsClusterManagerWithDeprecatedMasterRole() { .routingTable(createRoutingTable(1, metadata)) .build(); event = new ClusterChangedEvent("_na_", newState, previousState); - assertFalse("local node should not be master", event.localNodeMaster()); + assertFalse("local node should not be master", event.localNodeClusterManager()); } private static class CustomMetadata2 extends TestCustomMetadata { @@ -476,7 +476,7 @@ private static DiscoveryNodes createDiscoveryNodes(final int numNodes, final boo Set roles = new HashSet<>(); if (i == 0) { // the cluster-manager node - builder.masterNodeId(nodeId); + builder.clusterManagerNodeId(nodeId); roles.add(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); } else if (i == 1) { // the alternate cluster-manager node diff --git a/server/src/test/java/org/opensearch/cluster/ClusterStateTests.java b/server/src/test/java/org/opensearch/cluster/ClusterStateTests.java index 3155954d020a4..f5461054ca70c 100644 --- a/server/src/test/java/org/opensearch/cluster/ClusterStateTests.java +++ b/server/src/test/java/org/opensearch/cluster/ClusterStateTests.java @@ -90,15 +90,15 @@ public void testSupersedes() { ClusterState noClusterManager2 = ClusterState.builder(name).version(randomInt(5)).nodes(nodes).build(); ClusterState withClusterManager1a = ClusterState.builder(name) .version(randomInt(5)) - .nodes(DiscoveryNodes.builder(nodes).masterNodeId(node1.getId())) + .nodes(DiscoveryNodes.builder(nodes).clusterManagerNodeId(node1.getId())) .build(); ClusterState withClusterManager1b = ClusterState.builder(name) .version(randomInt(5)) - .nodes(DiscoveryNodes.builder(nodes).masterNodeId(node1.getId())) + .nodes(DiscoveryNodes.builder(nodes).clusterManagerNodeId(node1.getId())) .build(); ClusterState withClusterManager2 = ClusterState.builder(name) .version(randomInt(5)) - .nodes(DiscoveryNodes.builder(nodes).masterNodeId(node2.getId())) + .nodes(DiscoveryNodes.builder(nodes).clusterManagerNodeId(node2.getId())) .build(); // states with no cluster-manager should never supersede anything @@ -871,7 +871,7 @@ private ClusterState buildClusterState() throws IOException { .stateUUID("stateUUID") .nodes( DiscoveryNodes.builder() - .masterNodeId("clusterManagerNodeId") + .clusterManagerNodeId("clusterManagerNodeId") .add(new DiscoveryNode("nodeId1", new TransportAddress(InetAddress.getByName("127.0.0.1"), 111), Version.CURRENT)) .build() ) diff --git a/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java b/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java index 7f09f1eb8e2be..a989bacbf47bb 100644 --- a/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java +++ b/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java @@ -69,7 +69,9 @@ public class InternalClusterInfoServiceSchedulingTests extends OpenSearchTestCas public void testScheduling() { final DiscoveryNode discoveryNode = new DiscoveryNode("test", buildNewFakeTransportAddress(), Version.CURRENT); final DiscoveryNodes noClusterManager = DiscoveryNodes.builder().add(discoveryNode).localNodeId(discoveryNode.getId()).build(); - final DiscoveryNodes localClusterManager = DiscoveryNodes.builder(noClusterManager).masterNodeId(discoveryNode.getId()).build(); + final DiscoveryNodes localClusterManager = DiscoveryNodes.builder(noClusterManager) + .clusterManagerNodeId(discoveryNode.getId()) + .build(); final Settings.Builder settingsBuilder = Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), discoveryNode.getName()); if (randomBoolean()) { diff --git a/server/src/test/java/org/opensearch/cluster/action/index/MappingUpdatedActionTests.java b/server/src/test/java/org/opensearch/cluster/action/index/MappingUpdatedActionTests.java index 2278d09722fe2..7325024138500 100644 --- a/server/src/test/java/org/opensearch/cluster/action/index/MappingUpdatedActionTests.java +++ b/server/src/test/java/org/opensearch/cluster/action/index/MappingUpdatedActionTests.java @@ -130,13 +130,13 @@ protected void sendUpdateMapping(Index index, Mapping mappingUpdate, ActionListe }; PlainActionFuture fut1 = new PlainActionFuture<>(); - mua.updateMappingOnMaster(null, null, fut1); + mua.updateMappingOnClusterManager(null, null, fut1); assertEquals(1, inFlightListeners.size()); assertEquals(0, mua.blockedThreads()); PlainActionFuture fut2 = new PlainActionFuture<>(); Thread thread = new Thread(() -> { - mua.updateMappingOnMaster(null, null, fut2); // blocked + mua.updateMappingOnClusterManager(null, null, fut2); // blocked }); thread.start(); assertBusy(() -> assertEquals(1, mua.blockedThreads())); diff --git a/server/src/test/java/org/opensearch/cluster/action/shard/ShardStateActionTests.java b/server/src/test/java/org/opensearch/cluster/action/shard/ShardStateActionTests.java index a61d878d90c7c..2138f28f7b8c2 100644 --- a/server/src/test/java/org/opensearch/cluster/action/shard/ShardStateActionTests.java +++ b/server/src/test/java/org/opensearch/cluster/action/shard/ShardStateActionTests.java @@ -197,7 +197,7 @@ public void testSuccess() throws InterruptedException { assertEquals(shardEntry.shardId, shardRouting.shardId()); assertEquals(shardEntry.allocationId, shardRouting.allocationId().getId()); // sent to the cluster-manager - assertEquals(clusterService.state().nodes().getMasterNode().getId(), capturedRequests[0].node.getId()); + assertEquals(clusterService.state().nodes().getClusterManagerNode().getId(), capturedRequests[0].node.getId()); transport.handleResponse(capturedRequests[0].requestId, TransportResponse.Empty.INSTANCE); @@ -211,7 +211,7 @@ public void testNoClusterManager() throws InterruptedException { setState(clusterService, ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5))); DiscoveryNodes.Builder noClusterManagerBuilder = DiscoveryNodes.builder(clusterService.state().nodes()); - noClusterManagerBuilder.masterNodeId(null); + noClusterManagerBuilder.clusterManagerNodeId(null); setState(clusterService, ClusterState.builder(clusterService.state()).nodes(noClusterManagerBuilder)); CountDownLatch latch = new CountDownLatch(1); @@ -504,7 +504,9 @@ private void setUpClusterManagerRetryVerification( ) { shardStateAction.setOnBeforeWaitForNewClusterManagerAndRetry(() -> { DiscoveryNodes.Builder clusterManagerBuilder = DiscoveryNodes.builder(clusterService.state().nodes()); - clusterManagerBuilder.masterNodeId(clusterService.state().nodes().getMasterNodes().iterator().next().value.getId()); + clusterManagerBuilder.clusterManagerNodeId( + clusterService.state().nodes().getClusterManagerNodes().iterator().next().value.getId() + ); setState(clusterService, ClusterState.builder(clusterService.state()).nodes(clusterManagerBuilder)); }); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java index b9ae08775d11a..81fbaf5fce8c0 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java @@ -170,7 +170,7 @@ public void testDoesNotElectNonClusterManagerNode() { cluster.stabilise(); final ClusterNode leader = cluster.getAnyLeader(); - assertTrue(leader.getLocalNode().isMasterNode()); + assertTrue(leader.getLocalNode().isClusterManagerNode()); } } @@ -1729,7 +1729,7 @@ public void testDoesNotPerformElectionWhenRestartingFollower() { final ClusterNode leader = cluster.getAnyLeader(); final long expectedTerm = leader.coordinator.getCurrentTerm(); - if (cluster.clusterNodes.stream().filter(n -> n.getLocalNode().isMasterNode()).count() == 2) { + if (cluster.clusterNodes.stream().filter(n -> n.getLocalNode().isClusterManagerNode()).count() == 2) { // in the 2-node case, auto-shrinking the voting configuration is required to reduce the voting configuration down to just // the leader, otherwise restarting the other cluster-manager-eligible node triggers an election leader.submitSetAutoShrinkVotingConfiguration(true); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/FollowersCheckerTests.java b/server/src/test/java/org/opensearch/cluster/coordination/FollowersCheckerTests.java index d5947bf444d17..5bbbed5f4f8c0 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/FollowersCheckerTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/FollowersCheckerTests.java @@ -713,7 +713,7 @@ public void testPreferClusterManagerNodes() { .map(cr -> cr.node) .collect(Collectors.toList()); List sortedFollowerTargets = new ArrayList<>(followerTargets); - Collections.sort(sortedFollowerTargets, Comparator.comparing(n -> n.isMasterNode() == false)); + Collections.sort(sortedFollowerTargets, Comparator.comparing(n -> n.isClusterManagerNode() == false)); assertEquals(sortedFollowerTargets, followerTargets); } diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java index fec1bb025d235..02e502e762561 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java @@ -191,7 +191,7 @@ public void testUpdatesNodeWithNewRoles() throws Exception { DiscoveryNodes.builder() .add(clusterManagerNode) .localNodeId(clusterManagerNode.getId()) - .masterNodeId(clusterManagerNode.getId()) + .clusterManagerNodeId(clusterManagerNode.getId()) .add(bwcNode) ) .build(); @@ -208,12 +208,12 @@ public void testUpdatesNodeWithNewRoles() throws Exception { } /** - * Validate isBecomeMasterTask() can identify "become cluster manager task" properly + * Validate isBecomeClusterManagerTask() can identify "become cluster manager task" properly */ public void testIsBecomeClusterManagerTask() { JoinTaskExecutor.Task joinTaskOfMaster = JoinTaskExecutor.newBecomeMasterTask(); - assertThat(joinTaskOfMaster.isBecomeMasterTask(), is(true)); + assertThat(joinTaskOfMaster.isBecomeClusterManagerTask(), is(true)); JoinTaskExecutor.Task joinTaskOfClusterManager = JoinTaskExecutor.newBecomeClusterManagerTask(); - assertThat(joinTaskOfClusterManager.isBecomeMasterTask(), is(true)); + assertThat(joinTaskOfClusterManager.isBecomeClusterManagerTask(), is(true)); } } diff --git a/server/src/test/java/org/opensearch/cluster/coordination/LeaderCheckerTests.java b/server/src/test/java/org/opensearch/cluster/coordination/LeaderCheckerTests.java index b06799312d99a..6a93f05803484 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/LeaderCheckerTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/LeaderCheckerTests.java @@ -453,7 +453,7 @@ public void testLeaderBehaviour() { final DiscoveryNodes discoveryNodes = DiscoveryNodes.builder() .add(localNode) .localNodeId(localNode.getId()) - .masterNodeId(localNode.getId()) + .clusterManagerNodeId(localNode.getId()) .build(); { @@ -498,7 +498,7 @@ public void testLeaderBehaviour() { } { - leaderChecker.setCurrentNodes(DiscoveryNodes.builder(discoveryNodes).add(otherNode).masterNodeId(null).build()); + leaderChecker.setCurrentNodes(DiscoveryNodes.builder(discoveryNodes).add(otherNode).clusterManagerNodeId(null).build()); final CapturingTransportResponseHandler handler = new CapturingTransportResponseHandler(); transportService.sendRequest(localNode, LEADER_CHECK_ACTION_NAME, new LeaderCheckRequest(otherNode), handler); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java index 806468ed5e761..8e81a3043426f 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java @@ -336,17 +336,17 @@ public void testJoinWithHigherTermElectsLeader() { () -> new StatusInfo(HEALTHY, "healthy-info") ); assertFalse(isLocalNodeElectedMaster()); - assertNull(coordinator.getStateForClusterManagerService().nodes().getMasterNodeId()); + assertNull(coordinator.getStateForClusterManagerService().nodes().getClusterManagerNodeId()); long newTerm = initialTerm + randomLongBetween(1, 10); SimpleFuture fut = joinNodeAsync( new JoinRequest(node1, newTerm, Optional.of(new Join(node1, node0, newTerm, initialTerm, initialVersion))) ); assertEquals(Coordinator.Mode.LEADER, coordinator.getMode()); - assertNull(coordinator.getStateForClusterManagerService().nodes().getMasterNodeId()); + assertNull(coordinator.getStateForClusterManagerService().nodes().getClusterManagerNodeId()); deterministicTaskQueue.runAllRunnableTasks(); assertTrue(fut.isDone()); assertTrue(isLocalNodeElectedMaster()); - assertTrue(coordinator.getStateForClusterManagerService().nodes().isLocalNodeElectedMaster()); + assertTrue(coordinator.getStateForClusterManagerService().nodes().isLocalNodeElectedClusterManager()); } public void testJoinWithHigherTermButBetterStateGetsRejected() { diff --git a/server/src/test/java/org/opensearch/cluster/coordination/PublicationTests.java b/server/src/test/java/org/opensearch/cluster/coordination/PublicationTests.java index 3e86ec11ae7b3..517456f54b785 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/PublicationTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/PublicationTests.java @@ -462,7 +462,7 @@ public void testPublishingToClusterManagersFirst() { List publicationTargets = new ArrayList<>(publication.pendingPublications.keySet()); List sortedPublicationTargets = new ArrayList<>(publicationTargets); - Collections.sort(sortedPublicationTargets, Comparator.comparing(n -> n.isMasterNode() == false)); + Collections.sort(sortedPublicationTargets, Comparator.comparing(n -> n.isClusterManagerNode() == false)); assertEquals(sortedPublicationTargets, publicationTargets); } diff --git a/server/src/test/java/org/opensearch/cluster/health/ClusterStateHealthTests.java b/server/src/test/java/org/opensearch/cluster/health/ClusterStateHealthTests.java index bd856d5c41ace..80e76d44b9c41 100644 --- a/server/src/test/java/org/opensearch/cluster/health/ClusterStateHealthTests.java +++ b/server/src/test/java/org/opensearch/cluster/health/ClusterStateHealthTests.java @@ -144,7 +144,7 @@ public void testClusterHealthWaitsForClusterStateApplication() throws Interrupte setState( clusterService, ClusterState.builder(clusterService.state()) - .nodes(DiscoveryNodes.builder(clusterService.state().nodes()).masterNodeId(null)) + .nodes(DiscoveryNodes.builder(clusterService.state().nodes()).clusterManagerNodeId(null)) .build() ); @@ -163,7 +163,7 @@ public void testClusterHealthWaitsForClusterStateApplication() throws Interrupte .onNewClusterState( "restore cluster-manager", () -> ClusterState.builder(currentState) - .nodes(DiscoveryNodes.builder(currentState.nodes()).masterNodeId(currentState.nodes().getLocalNodeId())) + .nodes(DiscoveryNodes.builder(currentState.nodes()).clusterManagerNodeId(currentState.nodes().getLocalNodeId())) .build(), (source, e) -> {} ); @@ -219,7 +219,10 @@ public void testClusterHealth() throws IOException { ClusterStateHealth clusterStateHealth = new ClusterStateHealth(clusterState, concreteIndices); logger.info("cluster status: {}, expected {}", clusterStateHealth.getStatus(), counter.status()); clusterStateHealth = maybeSerialize(clusterStateHealth); - assertThat(clusterStateHealth.hasDiscoveredMaster(), equalTo(clusterService.state().nodes().getMasterNodeId() != null)); + assertThat( + clusterStateHealth.hasDiscoveredClusterManager(), + equalTo(clusterService.state().nodes().getClusterManagerNodeId() != null) + ); assertClusterHealth(clusterStateHealth, counter); } diff --git a/server/src/test/java/org/opensearch/cluster/metadata/AutoExpandReplicasTests.java b/server/src/test/java/org/opensearch/cluster/metadata/AutoExpandReplicasTests.java index 559dd86dce4b1..9a8ce63098744 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/AutoExpandReplicasTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/AutoExpandReplicasTests.java @@ -189,7 +189,7 @@ public void testAutoExpandWhenNodeLeavesAndPossiblyRejoins() throws InterruptedE assertThat(postTable.toString(), postTable.getAllAllocationIds(), everyItem(is(in(preTable.getAllAllocationIds())))); } else { // fake an election where conflicting nodes are removed and readded - state = ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).masterNodeId(null).build()).build(); + state = ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).clusterManagerNodeId(null).build()).build(); List conflictingNodes = randomSubsetOf(2, dataNodes); unchangedNodeIds = dataNodes.stream() @@ -214,7 +214,7 @@ public void testAutoExpandWhenNodeLeavesAndPossiblyRejoins() throws InterruptedE nodesToAdd.add(createNode(DiscoveryNodeRole.DATA_ROLE)); } - state = cluster.joinNodesAndBecomeMaster(state, nodesToAdd); + state = cluster.joinNodesAndBecomeClusterManager(state, nodesToAdd); postTable = state.routingTable().index("index").shard(0); } diff --git a/server/src/test/java/org/opensearch/cluster/metadata/TemplateUpgradeServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/TemplateUpgradeServiceTests.java index 1e52fa380793e..d3e47c3e42f25 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/TemplateUpgradeServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/TemplateUpgradeServiceTests.java @@ -367,7 +367,7 @@ public void clusterChanged(ClusterChangedEvent event) { new DiscoveryNode("node1", "node1", buildNewFakeTransportAddress(), emptyMap(), MASTER_DATA_ROLES, Version.CURRENT) ) .localNodeId("node1") - .masterNodeId("node1") + .clusterManagerNodeId("node1") .build() ) .metadata(metadata) diff --git a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeRoleSettingTests.java b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeRoleSettingTests.java index 55c518c62973e..630902f94d335 100644 --- a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeRoleSettingTests.java +++ b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeRoleSettingTests.java @@ -58,11 +58,11 @@ public void testIsIngestNode() { public void testIsMasterNode() { // It's used to add MASTER_ROLE into 'roleMap', because MASTER_ROLE is removed from DiscoveryNodeRole.BUILT_IN_ROLES in 2.0. DiscoveryNode.setAdditionalRoles(Collections.emptySet()); - runRoleTest(DiscoveryNode::isMasterNode, DiscoveryNodeRole.MASTER_ROLE); + runRoleTest(DiscoveryNode::isClusterManagerNode, DiscoveryNodeRole.MASTER_ROLE); } public void testIsClusterManagerNode() { - runRoleTest(DiscoveryNode::isMasterNode, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); + runRoleTest(DiscoveryNode::isClusterManagerNode, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); } public void testIsRemoteClusterClient() { diff --git a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodesTests.java b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodesTests.java index 80c7d8c9417fe..0d87981e60681 100644 --- a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodesTests.java +++ b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodesTests.java @@ -110,7 +110,7 @@ public void testAll() { final String[] nonClusterManagerNodes = StreamSupport.stream(discoveryNodes.getNodes().values().spliterator(), false) .map(n -> n.value) - .filter(n -> n.isMasterNode() == false) + .filter(n -> n.isClusterManagerNode() == false) .map(DiscoveryNode::getId) .toArray(String[]::new); assertThat(discoveryNodes.resolveNodes("_all", "cluster_manager:false"), arrayContainingInAnyOrder(nonClusterManagerNodes)); @@ -123,13 +123,13 @@ public void testCoordinatorOnlyNodes() { final String[] coordinatorOnlyNodes = StreamSupport.stream(discoveryNodes.getNodes().values().spliterator(), false) .map(n -> n.value) - .filter(n -> n.isDataNode() == false && n.isIngestNode() == false && n.isMasterNode() == false) + .filter(n -> n.isDataNode() == false && n.isIngestNode() == false && n.isClusterManagerNode() == false) .map(DiscoveryNode::getId) .toArray(String[]::new); final String[] nonCoordinatorOnlyNodes = StreamSupport.stream(discoveryNodes.getNodes().values().spliterator(), false) .map(n -> n.value) - .filter(n -> n.isMasterNode() || n.isDataNode() || n.isIngestNode()) + .filter(n -> n.isClusterManagerNode() || n.isDataNode() || n.isIngestNode()) .map(DiscoveryNode::getId) .toArray(String[]::new); @@ -179,11 +179,11 @@ public void testClusterManagersFirst() { final List inputNodes = randomNodes(10); final DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); inputNodes.forEach(discoBuilder::add); - final List returnedNodes = discoBuilder.build().mastersFirstStream().collect(Collectors.toList()); + final List returnedNodes = discoBuilder.build().clusterManagersFirstStream().collect(Collectors.toList()); assertEquals(returnedNodes.size(), inputNodes.size()); assertEquals(new HashSet<>(returnedNodes), new HashSet<>(inputNodes)); final List sortedNodes = new ArrayList<>(returnedNodes); - Collections.sort(sortedNodes, Comparator.comparing(n -> n.isMasterNode() == false)); + Collections.sort(sortedNodes, Comparator.comparing(n -> n.isClusterManagerNode() == false)); assertEquals(sortedNodes, returnedNodes); } @@ -260,13 +260,13 @@ public void testDeltas() { DiscoveryNodes.Builder builderA = DiscoveryNodes.builder(); nodesA.stream().forEach(builderA::add); final String clusterManagerAId = clusterManagerA == null ? null : clusterManagerA.getId(); - builderA.masterNodeId(clusterManagerAId); + builderA.clusterManagerNodeId(clusterManagerAId); builderA.localNodeId(RandomPicks.randomFrom(random(), nodesA).getId()); DiscoveryNodes.Builder builderB = DiscoveryNodes.builder(); nodesB.stream().forEach(builderB::add); final String clusterManagerBId = clusterManagerB == null ? null : clusterManagerB.getId(); - builderB.masterNodeId(clusterManagerBId); + builderB.clusterManagerNodeId(clusterManagerBId); builderB.localNodeId(RandomPicks.randomFrom(random(), nodesB).getId()); final DiscoveryNodes discoNodesA = builderA.build(); @@ -282,15 +282,15 @@ public void testDeltas() { assertThat(delta.previousClusterManagerNode().getId(), equalTo(clusterManagerAId)); } if (clusterManagerB == null) { - assertThat(delta.newMasterNode(), nullValue()); + assertThat(delta.newClusterManagerNode(), nullValue()); } else { - assertThat(delta.newMasterNode().getId(), equalTo(clusterManagerBId)); + assertThat(delta.newClusterManagerNode().getId(), equalTo(clusterManagerBId)); } if (Objects.equals(clusterManagerAId, clusterManagerBId)) { - assertFalse(delta.masterNodeChanged()); + assertFalse(delta.clusterManagerNodeChanged()); } else { - assertTrue(delta.masterNodeChanged()); + assertTrue(delta.clusterManagerNodeChanged()); } Set newNodes = new HashSet<>(nodesB); @@ -316,13 +316,13 @@ public void testDeprecatedMasterNodeFilter() { final String[] clusterManagerNodes = StreamSupport.stream(discoveryNodes.getNodes().values().spliterator(), false) .map(n -> n.value) - .filter(n -> n.isMasterNode() == true) + .filter(n -> n.isClusterManagerNode() == true) .map(DiscoveryNode::getId) .toArray(String[]::new); final String[] nonClusterManagerNodes = StreamSupport.stream(discoveryNodes.getNodes().values().spliterator(), false) .map(n -> n.value) - .filter(n -> n.isMasterNode() == false) + .filter(n -> n.isClusterManagerNode() == false) .map(DiscoveryNode::getId) .toArray(String[]::new); @@ -366,7 +366,7 @@ private static DiscoveryNodes buildDiscoveryNodes() { discoBuilder = discoBuilder.add(node); } discoBuilder.localNodeId(randomFrom(nodesList).getId()); - discoBuilder.masterNodeId(randomFrom(nodesList).getId()); + discoBuilder.clusterManagerNodeId(randomFrom(nodesList).getId()); return discoBuilder.build(); } @@ -384,7 +384,7 @@ Set matchingNodeIds(DiscoveryNodes nodes) { ELECTED_MASTER("_master") { @Override Set matchingNodeIds(DiscoveryNodes nodes) { - return Collections.singleton(nodes.getMasterNodeId()); + return Collections.singleton(nodes.getClusterManagerNodeId()); } }, // TODO: Remove this element after removing DiscoveryNodeRole.MASTER_ROLE @@ -392,7 +392,7 @@ Set matchingNodeIds(DiscoveryNodes nodes) { @Override Set matchingNodeIds(DiscoveryNodes nodes) { Set ids = new HashSet<>(); - nodes.getMasterNodes().keysIt().forEachRemaining(ids::add); + nodes.getClusterManagerNodes().keysIt().forEachRemaining(ids::add); return ids; } }, @@ -400,7 +400,7 @@ Set matchingNodeIds(DiscoveryNodes nodes) { @Override Set matchingNodeIds(DiscoveryNodes nodes) { Set ids = new HashSet<>(); - nodes.getMasterNodes().keysIt().forEachRemaining(ids::add); + nodes.getClusterManagerNodes().keysIt().forEachRemaining(ids::add); return ids; } }, @@ -495,7 +495,7 @@ public void testMaxMinNodeVersion() { ) ); discoBuilder.localNodeId("name_1"); - discoBuilder.masterNodeId("name_2"); + discoBuilder.clusterManagerNodeId("name_2"); DiscoveryNodes build = discoBuilder.build(); assertEquals(Version.fromString("1.1.0"), build.getMaxNodeVersion()); assertEquals(LegacyESVersion.fromString("5.1.0"), build.getMinNodeVersion()); @@ -523,7 +523,7 @@ public void testNodeExistsWithBWCVersion() { discoBuilder.add(node_2); discoBuilder.localNodeId("name_1"); - discoBuilder.masterNodeId("name_2"); + discoBuilder.clusterManagerNodeId("name_2"); DiscoveryNodes build = discoBuilder.build(); assertTrue(build.nodeExistsWithBWCVersion(buildDiscoveryNodeFromExisting(node_1, Version.CURRENT))); assertFalse(build.nodeExistsWithBWCVersion(buildDiscoveryNodeFromExisting(node_2, LegacyESVersion.fromString("7.10.2")))); diff --git a/server/src/test/java/org/opensearch/cluster/routing/BatchedRerouteServiceTests.java b/server/src/test/java/org/opensearch/cluster/routing/BatchedRerouteServiceTests.java index 2f6d34e1eb204..241ee3fca3553 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/BatchedRerouteServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/BatchedRerouteServiceTests.java @@ -237,7 +237,10 @@ public void testNotifiesOnFailure() throws InterruptedException { clusterService.getClusterApplierService().onNewClusterState("simulated", () -> { ClusterState state = clusterService.state(); return ClusterState.builder(state) - .nodes(DiscoveryNodes.builder(state.nodes()).masterNodeId(randomBoolean() ? null : state.nodes().getLocalNodeId())) + .nodes( + DiscoveryNodes.builder(state.nodes()) + .clusterManagerNodeId(randomBoolean() ? null : state.nodes().getLocalNodeId()) + ) .build(); }, (source, e) -> {}); } diff --git a/server/src/test/java/org/opensearch/cluster/routing/DelayedAllocationServiceTests.java b/server/src/test/java/org/opensearch/cluster/routing/DelayedAllocationServiceTests.java index 635dfe409058a..a3878f5cbeb25 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/DelayedAllocationServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/DelayedAllocationServiceTests.java @@ -108,7 +108,7 @@ public void testNoDelayedUnassigned() throws Exception { .routingTable(RoutingTable.builder().addAsNew(metadata.index("test")).build()) .build(); clusterState = ClusterState.builder(clusterState) - .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).localNodeId("node1").masterNodeId("node1")) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).localNodeId("node1").clusterManagerNodeId("node1")) .build(); clusterState = allocationService.reroute(clusterState, "reroute"); // starting primaries @@ -154,7 +154,7 @@ public void testDelayedUnassignedScheduleReroute() throws Exception { .routingTable(RoutingTable.builder().addAsNew(metadata.index("test")).build()) .build(); clusterState = ClusterState.builder(clusterState) - .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).localNodeId("node1").masterNodeId("node1")) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).localNodeId("node1").clusterManagerNodeId("node1")) .build(); final long baseTimestampNanos = System.nanoTime(); allocationService.setNanoTimeOverride(baseTimestampNanos); @@ -264,7 +264,7 @@ public void testDelayedUnassignedScheduleRerouteAfterDelayedReroute() throws Exc DiscoveryNodes.builder() .add(newNode("node0", singleton(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE))) .localNodeId("node0") - .masterNodeId("node0") + .clusterManagerNodeId("node0") .add(newNode("node1")) .add(newNode("node2")) .add(newNode("node3")) @@ -449,7 +449,7 @@ public void testDelayedUnassignedScheduleRerouteRescheduledOnShorterDelay() thro .add(newNode("node3")) .add(newNode("node4")) .localNodeId("node1") - .masterNodeId("node1") + .clusterManagerNodeId("node1") ) .build(); final long nodeLeftTimestampNanos = System.nanoTime(); @@ -556,7 +556,7 @@ private TestDelayAllocationService(ThreadPool threadPool, ClusterService cluster } @Override - protected void assertClusterOrMasterStateThread() { + protected void assertClusterOrClusterManagerStateThread() { // do not check this in the unit tests } diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/RestoreInProgressAllocationDeciderTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/RestoreInProgressAllocationDeciderTests.java index 1a047b3ccd9da..4c0bdb2c900c7 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/RestoreInProgressAllocationDeciderTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/RestoreInProgressAllocationDeciderTests.java @@ -201,7 +201,7 @@ private ClusterState createInitialClusterState() { DiscoveryNodes discoveryNodes = DiscoveryNodes.builder() .add(newNode("cluster-manager", Collections.singleton(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE))) .localNodeId("cluster-manager") - .masterNodeId("cluster-manager") + .clusterManagerNodeId("cluster-manager") .build(); ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) @@ -230,7 +230,7 @@ private Decision executeAllocation(final ClusterState clusterState, final ShardR if (randomBoolean()) { decision = decider.canAllocate(shardRouting, allocation); } else { - DiscoveryNode node = clusterState.getNodes().getMasterNode(); + DiscoveryNode node = clusterState.getNodes().getClusterManagerNode(); decision = decider.canAllocate(shardRouting, new RoutingNode(node.getId(), node), allocation); } return decision; diff --git a/server/src/test/java/org/opensearch/cluster/serialization/ClusterSerializationTests.java b/server/src/test/java/org/opensearch/cluster/serialization/ClusterSerializationTests.java index 83e99475dc6a4..05a9fb272fbac 100644 --- a/server/src/test/java/org/opensearch/cluster/serialization/ClusterSerializationTests.java +++ b/server/src/test/java/org/opensearch/cluster/serialization/ClusterSerializationTests.java @@ -85,7 +85,7 @@ public void testClusterStateSerialization() throws Exception { .add(newNode("node2")) .add(newNode("node3")) .localNodeId("node1") - .masterNodeId("node2") + .clusterManagerNodeId("node2") .build(); ClusterState clusterState = ClusterState.builder(new ClusterName("clusterName1")) diff --git a/server/src/test/java/org/opensearch/cluster/serialization/ClusterStateToStringTests.java b/server/src/test/java/org/opensearch/cluster/serialization/ClusterStateToStringTests.java index b2e9b8ddb9ea7..f16a1eda2f34d 100644 --- a/server/src/test/java/org/opensearch/cluster/serialization/ClusterStateToStringTests.java +++ b/server/src/test/java/org/opensearch/cluster/serialization/ClusterStateToStringTests.java @@ -67,7 +67,7 @@ public void testClusterStateSerialization() throws Exception { DiscoveryNodes nodes = DiscoveryNodes.builder() .add(new DiscoveryNode("node_foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT)) .localNodeId("node_foo") - .masterNodeId("node_foo") + .clusterManagerNodeId("node_foo") .build(); ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) diff --git a/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java b/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java index a603dac9f42ca..830a7d2a557f2 100644 --- a/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java @@ -119,7 +119,7 @@ private TimedClusterApplierService createTimedClusterService(boolean makeCluster DiscoveryNodes.builder() .add(localNode) .localNodeId(localNode.getId()) - .masterNodeId(makeClusterManager ? localNode.getId() : null) + .clusterManagerNodeId(makeClusterManager ? localNode.getId() : null) ) .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK) .build() @@ -297,7 +297,7 @@ public void testLocalNodeClusterManagerListenerCallbacks() { TimedClusterApplierService timedClusterApplierService = createTimedClusterService(false); AtomicBoolean isClusterManager = new AtomicBoolean(); - timedClusterApplierService.addLocalNodeMasterListener(new LocalNodeClusterManagerListener() { + timedClusterApplierService.addLocalNodeClusterManagerListener(new LocalNodeClusterManagerListener() { @Override public void onMaster() { isClusterManager.set(true); @@ -311,20 +311,20 @@ public void offMaster() { ClusterState state = timedClusterApplierService.state(); DiscoveryNodes nodes = state.nodes(); - DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(nodes).masterNodeId(nodes.getLocalNodeId()); + DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(nodes).clusterManagerNodeId(nodes.getLocalNodeId()); state = ClusterState.builder(state).blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).nodes(nodesBuilder).build(); setState(timedClusterApplierService, state); assertThat(isClusterManager.get(), is(true)); nodes = state.nodes(); - nodesBuilder = DiscoveryNodes.builder(nodes).masterNodeId(null); + nodesBuilder = DiscoveryNodes.builder(nodes).clusterManagerNodeId(null); state = ClusterState.builder(state) .blocks(ClusterBlocks.builder().addGlobalBlock(NoClusterManagerBlockService.NO_MASTER_BLOCK_WRITES)) .nodes(nodesBuilder) .build(); setState(timedClusterApplierService, state); assertThat(isClusterManager.get(), is(false)); - nodesBuilder = DiscoveryNodes.builder(nodes).masterNodeId(nodes.getLocalNodeId()); + nodesBuilder = DiscoveryNodes.builder(nodes).clusterManagerNodeId(nodes.getLocalNodeId()); state = ClusterState.builder(state).blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).nodes(nodesBuilder).build(); setState(timedClusterApplierService, state); assertThat(isClusterManager.get(), is(true)); diff --git a/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java b/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java index c829bef576be5..8827064974a19 100644 --- a/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java @@ -136,7 +136,7 @@ private MasterService createClusterManagerService(boolean makeClusterManager) { DiscoveryNodes.builder() .add(localNode) .localNodeId(localNode.getId()) - .masterNodeId(makeClusterManager ? localNode.getId() : null) + .clusterManagerNodeId(makeClusterManager ? localNode.getId() : null) ) .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK) .build(); diff --git a/server/src/test/java/org/opensearch/discovery/AbstractDisruptionTestCase.java b/server/src/test/java/org/opensearch/discovery/AbstractDisruptionTestCase.java index 307edc2f03075..fda1e0022c754 100644 --- a/server/src/test/java/org/opensearch/discovery/AbstractDisruptionTestCase.java +++ b/server/src/test/java/org/opensearch/discovery/AbstractDisruptionTestCase.java @@ -167,7 +167,10 @@ void assertNoClusterManager(final String node, @Nullable final ClusterBlock expe assertBusy(() -> { ClusterState state = getNodeClusterState(node); final DiscoveryNodes nodes = state.nodes(); - assertNull("node [" + node + "] still has [" + nodes.getMasterNode() + "] as cluster-manager", nodes.getMasterNode()); + assertNull( + "node [" + node + "] still has [" + nodes.getClusterManagerNode() + "] as cluster-manager", + nodes.getClusterManagerNode() + ); if (expectedBlocks != null) { for (ClusterBlockLevel level : expectedBlocks.levels()) { assertTrue( @@ -183,10 +186,10 @@ void assertDifferentClusterManager(final String node, final String oldClusterMan assertBusy(() -> { ClusterState state = getNodeClusterState(node); String clusterManagerNode = null; - if (state.nodes().getMasterNode() != null) { - clusterManagerNode = state.nodes().getMasterNode().getName(); + if (state.nodes().getClusterManagerNode() != null) { + clusterManagerNode = state.nodes().getClusterManagerNode().getName(); } - logger.trace("[{}] cluster-manager is [{}]", node, state.nodes().getMasterNode()); + logger.trace("[{}] cluster-manager is [{}]", node, state.nodes().getClusterManagerNode()); assertThat( "node [" + node + "] still has [" + clusterManagerNode + "] as cluster-manager", oldClusterManagerNode, @@ -201,7 +204,9 @@ void assertClusterManager(String clusterManagerNode, List nodes) throws ClusterState state = getNodeClusterState(node); String failMsgSuffix = "cluster_state:\n" + state; assertThat("wrong node count on [" + node + "]. " + failMsgSuffix, state.nodes().getSize(), equalTo(nodes.size())); - String otherClusterManagerNodeName = state.nodes().getMasterNode() != null ? state.nodes().getMasterNode().getName() : null; + String otherClusterManagerNodeName = state.nodes().getClusterManagerNode() != null + ? state.nodes().getClusterManagerNode().getName() + : null; assertThat( "wrong cluster-manager on node [" + node + "]. " + failMsgSuffix, otherClusterManagerNodeName, diff --git a/server/src/test/java/org/opensearch/discovery/PeerFinderMessagesTests.java b/server/src/test/java/org/opensearch/discovery/PeerFinderMessagesTests.java index a274760872e88..bcf41ebf65a04 100644 --- a/server/src/test/java/org/opensearch/discovery/PeerFinderMessagesTests.java +++ b/server/src/test/java/org/opensearch/discovery/PeerFinderMessagesTests.java @@ -97,12 +97,12 @@ public void testPeersResponseEqualsHashCodeSerialization() { final long term = in.getTerm(); if (randomBoolean()) { return new PeersResponse( - in.getMasterNode(), + in.getClusterManagerNode(), in.getKnownPeers(), randomValueOtherThan(term, OpenSearchTestCase::randomNonNegativeLong) ); } else { - if (in.getMasterNode().isPresent()) { + if (in.getClusterManagerNode().isPresent()) { if (randomBoolean()) { return new PeersResponse(Optional.of(createNode(randomAlphaOfLength(10))), in.getKnownPeers(), term); } else { @@ -112,7 +112,7 @@ public void testPeersResponseEqualsHashCodeSerialization() { if (randomBoolean()) { return new PeersResponse(Optional.of(createNode(randomAlphaOfLength(10))), emptyList(), term); } else { - return new PeersResponse(in.getMasterNode(), modifyDiscoveryNodesList(in.getKnownPeers(), false), term); + return new PeersResponse(in.getClusterManagerNode(), modifyDiscoveryNodesList(in.getKnownPeers(), false), term); } } } diff --git a/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java b/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java index 2f78e60631ec2..5e7dede0309c6 100644 --- a/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java +++ b/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java @@ -140,7 +140,7 @@ public void run() { for (final Map.Entry addressAndNode : reachableNodes.entrySet()) { if (addressAndNode.getKey().equals(transportAddress)) { final DiscoveryNode discoveryNode = addressAndNode.getValue(); - if (discoveryNode.isMasterNode()) { + if (discoveryNode.isClusterManagerNode()) { disconnectedNodes.remove(discoveryNode); connectedNodes.add(discoveryNode); assertTrue(inFlightConnectionAttempts.remove(transportAddress)); @@ -477,7 +477,7 @@ public void testRespondsToRequestWhenActive() { peerFinder.activate(lastAcceptedNodes); final PeersResponse peersResponse1 = peerFinder.handlePeersRequest(new PeersRequest(sourceNode, Collections.emptyList())); - assertFalse(peersResponse1.getMasterNode().isPresent()); + assertFalse(peersResponse1.getClusterManagerNode().isPresent()); assertThat(peersResponse1.getKnownPeers(), empty()); // sourceNode is not yet known assertThat(peersResponse1.getTerm(), is(0L)); @@ -488,7 +488,7 @@ public void testRespondsToRequestWhenActive() { final long updatedTerm = randomNonNegativeLong(); peerFinder.setCurrentTerm(updatedTerm); final PeersResponse peersResponse2 = peerFinder.handlePeersRequest(new PeersRequest(sourceNode, Collections.emptyList())); - assertFalse(peersResponse2.getMasterNode().isPresent()); + assertFalse(peersResponse2.getClusterManagerNode().isPresent()); assertThat(peersResponse2.getKnownPeers(), contains(sourceNode)); assertThat(peersResponse2.getTerm(), is(updatedTerm)); } @@ -531,7 +531,7 @@ public PeersResponse read(StreamInput in) throws IOException { @Override public void handleResponse(PeersResponse response) { assertTrue(responseReceived.compareAndSet(false, true)); - assertFalse(response.getMasterNode().isPresent()); + assertFalse(response.getClusterManagerNode().isPresent()); assertThat(response.getKnownPeers(), empty()); // sourceNode is not yet known assertThat(response.getTerm(), is(0L)); } diff --git a/server/src/test/java/org/opensearch/gateway/GatewayServiceTests.java b/server/src/test/java/org/opensearch/gateway/GatewayServiceTests.java index 51ba096a86ae0..ba0268b627b95 100644 --- a/server/src/test/java/org/opensearch/gateway/GatewayServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/GatewayServiceTests.java @@ -135,7 +135,7 @@ public void testRecoverStateUpdateTask() throws Exception { nodeId ); ClusterState stateWithBlock = ClusterState.builder(ClusterName.DEFAULT) - .nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(clusterManagerNode).build()) + .nodes(DiscoveryNodes.builder().localNodeId(nodeId).clusterManagerNodeId(nodeId).add(clusterManagerNode).build()) .blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK).build()) .build(); diff --git a/server/src/test/java/org/opensearch/gateway/IncrementalClusterStateWriterTests.java b/server/src/test/java/org/opensearch/gateway/IncrementalClusterStateWriterTests.java index 00ca35207620d..33a5bc5671894 100644 --- a/server/src/test/java/org/opensearch/gateway/IncrementalClusterStateWriterTests.java +++ b/server/src/test/java/org/opensearch/gateway/IncrementalClusterStateWriterTests.java @@ -188,7 +188,7 @@ private DiscoveryNodes.Builder generateDiscoveryNodes(boolean clusterManagerElig .add(newNode("node1", clusterManagerEligible ? CLUSTER_MANAGER_DATA_ROLES : dataOnlyRoles)) .add(newNode("cluster_manager_node", CLUSTER_MANAGER_DATA_ROLES)) .localNodeId("node1") - .masterNodeId(clusterManagerEligible ? "node1" : "cluster_manager_node"); + .clusterManagerNodeId(clusterManagerEligible ? "node1" : "cluster_manager_node"); } private IndexMetadata createIndexMetadata(String name) { diff --git a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java index 00bdb87cdecf7..918fc91e7076f 100644 --- a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java @@ -2680,7 +2680,7 @@ public void testSeqNoAndCheckpoints() throws IOException, InterruptedException { ); ReplicationTracker gcpTracker = (ReplicationTracker) initialEngine.config().getGlobalCheckpointSupplier(); - gcpTracker.updateFromMaster( + gcpTracker.updateFromClusterManager( 1L, new HashSet<>(Collections.singletonList(primary.allocationId().getId())), new IndexShardRoutingTable.Builder(shardId).addShard(primary).build() @@ -2695,7 +2695,7 @@ public void testSeqNoAndCheckpoints() throws IOException, InterruptedException { ); countDownLatch.await(); } - gcpTracker.updateFromMaster( + gcpTracker.updateFromClusterManager( 2L, new HashSet<>(Collections.singletonList(primary.allocationId().getId())), new IndexShardRoutingTable.Builder(shardId).addShard(primary).addShard(initializingReplica).build() @@ -2703,7 +2703,7 @@ public void testSeqNoAndCheckpoints() throws IOException, InterruptedException { gcpTracker.initiateTracking(initializingReplica.allocationId().getId()); gcpTracker.markAllocationIdAsInSync(initializingReplica.allocationId().getId(), replicaLocalCheckpoint); final ShardRouting replica = initializingReplica.moveToStarted(); - gcpTracker.updateFromMaster( + gcpTracker.updateFromClusterManager( 3L, new HashSet<>(Arrays.asList(primary.allocationId().getId(), replica.allocationId().getId())), new IndexShardRoutingTable.Builder(shardId).addShard(primary).addShard(replica).build() diff --git a/server/src/test/java/org/opensearch/index/engine/NoOpEngineTests.java b/server/src/test/java/org/opensearch/index/engine/NoOpEngineTests.java index 053385b023667..e53a3b09a5eb6 100644 --- a/server/src/test/java/org/opensearch/index/engine/NoOpEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/NoOpEngineTests.java @@ -99,7 +99,7 @@ public void testNoopAfterRegularEngine() throws IOException { allocationId ); IndexShardRoutingTable table = new IndexShardRoutingTable.Builder(shardId).addShard(routing).build(); - tracker.updateFromMaster(1L, Collections.singleton(allocationId.getId()), table); + tracker.updateFromClusterManager(1L, Collections.singleton(allocationId.getId()), table); tracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); for (int i = 0; i < docs; i++) { ParsedDocument doc = testParsedDocument("" + i, null, testDocumentWithTextField(), B_1, null); @@ -203,7 +203,7 @@ public void testTrimUnreferencedTranslogFiles() throws Exception { allocationId ); IndexShardRoutingTable table = new IndexShardRoutingTable.Builder(shardId).addShard(routing).build(); - tracker.updateFromMaster(1L, Collections.singleton(allocationId.getId()), table); + tracker.updateFromClusterManager(1L, Collections.singleton(allocationId.getId()), table); tracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); engine.onSettingsChanged(TimeValue.MINUS_ONE, ByteSizeValue.ZERO, randomNonNegativeLong()); final int numDocs = scaledRandomIntBetween(10, 3000); diff --git a/server/src/test/java/org/opensearch/index/seqno/PeerRecoveryRetentionLeaseExpiryTests.java b/server/src/test/java/org/opensearch/index/seqno/PeerRecoveryRetentionLeaseExpiryTests.java index 5a8b04bce24ca..b97b537d4280c 100644 --- a/server/src/test/java/org/opensearch/index/seqno/PeerRecoveryRetentionLeaseExpiryTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/PeerRecoveryRetentionLeaseExpiryTests.java @@ -95,7 +95,7 @@ public void setUpReplicationTracker() throws InterruptedException { (leases, listener) -> {}, () -> safeCommitInfo ); - replicationTracker.updateFromMaster( + replicationTracker.updateFromClusterManager( 1L, Collections.singleton(primaryAllocationId.getId()), routingTable(Collections.emptySet(), primaryAllocationId) @@ -107,7 +107,7 @@ public void setUpReplicationTracker() throws InterruptedException { Collections.singleton(replicaAllocationId), primaryAllocationId ); - replicationTracker.updateFromMaster(2L, Collections.singleton(primaryAllocationId.getId()), routingTableWithReplica); + replicationTracker.updateFromClusterManager(2L, Collections.singleton(primaryAllocationId.getId()), routingTableWithReplica); replicationTracker.addPeerRecoveryRetentionLease( routingTableWithReplica.getByAllocationId(replicaAllocationId.getId()).currentNodeId(), randomCheckpoint(), @@ -127,7 +127,7 @@ private void startReplica() { final IndexShardRoutingTable.Builder builder = new IndexShardRoutingTable.Builder(replicationTracker.routingTable); builder.removeShard(replicaShardRouting); builder.addShard(replicaShardRouting.moveToStarted()); - replicationTracker.updateFromMaster( + replicationTracker.updateFromClusterManager( replicationTracker.appliedClusterStateVersion + 1, replicationTracker.routingTable.shards().stream().map(sr -> sr.allocationId().getId()).collect(Collectors.toSet()), builder.build() diff --git a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java index d94c1b919ad71..fc584ef90b54a 100644 --- a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java @@ -86,7 +86,7 @@ public void testAddOrRenewRetentionLease() { (leases, listener) -> {}, OPS_BASED_RECOVERY_ALWAYS_REASONABLE ); - replicationTracker.updateFromMaster( + replicationTracker.updateFromClusterManager( randomNonNegativeLong(), Collections.singleton(allocationId.getId()), routingTable(Collections.emptySet(), allocationId) @@ -134,7 +134,7 @@ public void testAddDuplicateRetentionLease() { (leases, listener) -> {}, OPS_BASED_RECOVERY_ALWAYS_REASONABLE ); - replicationTracker.updateFromMaster( + replicationTracker.updateFromClusterManager( randomNonNegativeLong(), Collections.singleton(allocationId.getId()), routingTable(Collections.emptySet(), allocationId) @@ -166,7 +166,7 @@ public void testRenewNotFoundRetentionLease() { (leases, listener) -> {}, OPS_BASED_RECOVERY_ALWAYS_REASONABLE ); - replicationTracker.updateFromMaster( + replicationTracker.updateFromClusterManager( randomNonNegativeLong(), Collections.singleton(allocationId.getId()), routingTable(Collections.emptySet(), allocationId) @@ -205,7 +205,7 @@ public void testAddRetentionLeaseCausesRetentionLeaseSync() { OPS_BASED_RECOVERY_ALWAYS_REASONABLE ); reference.set(replicationTracker); - replicationTracker.updateFromMaster( + replicationTracker.updateFromClusterManager( randomNonNegativeLong(), Collections.singleton(allocationId.getId()), routingTable(Collections.emptySet(), allocationId) @@ -243,7 +243,7 @@ public void testRemoveRetentionLease() { (leases, listener) -> {}, OPS_BASED_RECOVERY_ALWAYS_REASONABLE ); - replicationTracker.updateFromMaster( + replicationTracker.updateFromClusterManager( randomNonNegativeLong(), Collections.singleton(allocationId.getId()), routingTable(Collections.emptySet(), allocationId) @@ -308,7 +308,7 @@ public void testCloneRetentionLease() { OPS_BASED_RECOVERY_ALWAYS_REASONABLE ); replicationTrackerRef.set(replicationTracker); - replicationTracker.updateFromMaster( + replicationTracker.updateFromClusterManager( randomNonNegativeLong(), Collections.singleton(allocationId.getId()), routingTable(Collections.emptySet(), allocationId) @@ -353,7 +353,7 @@ public void testCloneNonexistentRetentionLease() { (leases, listener) -> {}, OPS_BASED_RECOVERY_ALWAYS_REASONABLE ); - replicationTracker.updateFromMaster( + replicationTracker.updateFromClusterManager( randomNonNegativeLong(), Collections.singleton(allocationId.getId()), routingTable(Collections.emptySet(), allocationId) @@ -382,7 +382,7 @@ public void testCloneDuplicateRetentionLease() { (leases, listener) -> {}, OPS_BASED_RECOVERY_ALWAYS_REASONABLE ); - replicationTracker.updateFromMaster( + replicationTracker.updateFromClusterManager( randomNonNegativeLong(), Collections.singleton(allocationId.getId()), routingTable(Collections.emptySet(), allocationId) @@ -415,7 +415,7 @@ public void testRemoveNotFound() { (leases, listener) -> {}, OPS_BASED_RECOVERY_ALWAYS_REASONABLE ); - replicationTracker.updateFromMaster( + replicationTracker.updateFromClusterManager( randomNonNegativeLong(), Collections.singleton(allocationId.getId()), routingTable(Collections.emptySet(), allocationId) @@ -454,7 +454,7 @@ public void testRemoveRetentionLeaseCausesRetentionLeaseSync() { OPS_BASED_RECOVERY_ALWAYS_REASONABLE ); reference.set(replicationTracker); - replicationTracker.updateFromMaster( + replicationTracker.updateFromClusterManager( randomNonNegativeLong(), Collections.singleton(allocationId.getId()), routingTable(Collections.emptySet(), allocationId) @@ -506,7 +506,7 @@ private void runExpirationTest(final boolean primaryMode) { (leases, listener) -> {}, OPS_BASED_RECOVERY_ALWAYS_REASONABLE ); - replicationTracker.updateFromMaster( + replicationTracker.updateFromClusterManager( randomNonNegativeLong(), Collections.singleton(allocationId.getId()), routingTable(Collections.emptySet(), allocationId) @@ -585,7 +585,7 @@ public void testReplicaIgnoresOlderRetentionLeasesVersion() { (leases, listener) -> {}, OPS_BASED_RECOVERY_ALWAYS_REASONABLE ); - replicationTracker.updateFromMaster( + replicationTracker.updateFromClusterManager( randomNonNegativeLong(), Collections.singleton(allocationId.getId()), routingTable(Collections.emptySet(), allocationId) @@ -638,7 +638,7 @@ public void testLoadAndPersistRetentionLeases() throws IOException { (leases, listener) -> {}, OPS_BASED_RECOVERY_ALWAYS_REASONABLE ); - replicationTracker.updateFromMaster( + replicationTracker.updateFromClusterManager( randomNonNegativeLong(), Collections.singleton(allocationId.getId()), routingTable(Collections.emptySet(), allocationId) @@ -673,7 +673,7 @@ public void testUnnecessaryPersistenceOfRetentionLeases() throws IOException { (leases, listener) -> {}, OPS_BASED_RECOVERY_ALWAYS_REASONABLE ); - replicationTracker.updateFromMaster( + replicationTracker.updateFromClusterManager( randomNonNegativeLong(), Collections.singleton(allocationId.getId()), routingTable(Collections.emptySet(), allocationId) @@ -726,7 +726,7 @@ public void testPersistRetentionLeasesUnderConcurrency() throws IOException { (leases, listener) -> {}, OPS_BASED_RECOVERY_ALWAYS_REASONABLE ); - replicationTracker.updateFromMaster( + replicationTracker.updateFromClusterManager( randomNonNegativeLong(), Collections.singleton(allocationId.getId()), routingTable(Collections.emptySet(), allocationId) @@ -790,7 +790,7 @@ public void testRenewLeaseWithLowerRetainingSequenceNumber() throws Exception { (leases, listener) -> {}, OPS_BASED_RECOVERY_ALWAYS_REASONABLE ); - replicationTracker.updateFromMaster( + replicationTracker.updateFromClusterManager( randomNonNegativeLong(), Collections.singleton(allocationId.getId()), routingTable(Collections.emptySet(), allocationId) diff --git a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java index 8fd8449108333..66c484cd40cce 100644 --- a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java @@ -137,7 +137,7 @@ public void testGlobalCheckpointUpdate() { logger.info(" - [{}], local checkpoint [{}], [{}]", aId, allocations.get(aId), type); }); - tracker.updateFromMaster(initialClusterStateVersion, ids(active), routingTable(initializing, primaryId)); + tracker.updateFromClusterManager(initialClusterStateVersion, ids(active), routingTable(initializing, primaryId)); tracker.activatePrimaryMode(NO_OPS_PERFORMED); assertThat(tracker.getReplicationGroup().getReplicationTargets().size(), equalTo(1)); initializing.forEach(aId -> markAsTrackingAndInSyncQuietly(tracker, aId.getId(), NO_OPS_PERFORMED)); @@ -167,7 +167,7 @@ public void testGlobalCheckpointUpdate() { Set newInitializing = new HashSet<>(initializing); newInitializing.add(extraId); - tracker.updateFromMaster(initialClusterStateVersion + 1, ids(active), routingTable(newInitializing, primaryId)); + tracker.updateFromClusterManager(initialClusterStateVersion + 1, ids(active), routingTable(newInitializing, primaryId)); addPeerRecoveryRetentionLease(tracker, extraId); tracker.initiateTracking(extraId.getId()); @@ -208,7 +208,7 @@ public void testMarkAllocationIdAsInSync() throws Exception { final AllocationId primaryId = active.iterator().next(); final AllocationId replicaId = initializing.iterator().next(); final ReplicationTracker tracker = newTracker(primaryId); - tracker.updateFromMaster(initialClusterStateVersion, ids(active), routingTable(initializing, primaryId)); + tracker.updateFromClusterManager(initialClusterStateVersion, ids(active), routingTable(initializing, primaryId)); final long localCheckpoint = randomLongBetween(0, Long.MAX_VALUE - 1); tracker.activatePrimaryMode(localCheckpoint); addPeerRecoveryRetentionLease(tracker, replicaId); @@ -249,7 +249,7 @@ public void testMissingActiveIdsPreventAdvance() { assigned.putAll(initializing); AllocationId primaryId = active.keySet().iterator().next(); final ReplicationTracker tracker = newTracker(primaryId); - tracker.updateFromMaster(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet(), primaryId)); + tracker.updateFromClusterManager(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet(), primaryId)); tracker.activatePrimaryMode(NO_OPS_PERFORMED); randomSubsetOf(initializing.keySet()).forEach(k -> markAsTrackingAndInSyncQuietly(tracker, k.getId(), NO_OPS_PERFORMED)); final AllocationId missingActiveID = randomFrom(active.keySet()); @@ -275,7 +275,7 @@ public void testMissingInSyncIdsPreventAdvance() { AllocationId primaryId = active.keySet().iterator().next(); final ReplicationTracker tracker = newTracker(primaryId); - tracker.updateFromMaster(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet(), primaryId)); + tracker.updateFromClusterManager(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet(), primaryId)); tracker.activatePrimaryMode(NO_OPS_PERFORMED); randomSubsetOf(randomIntBetween(1, initializing.size() - 1), initializing.keySet()).forEach( aId -> markAsTrackingAndInSyncQuietly(tracker, aId.getId(), NO_OPS_PERFORMED) @@ -298,7 +298,7 @@ public void testInSyncIdsAreIgnoredIfNotValidatedByClusterManager() { final Map nonApproved = randomAllocationsWithLocalCheckpoints(1, 5); final AllocationId primaryId = active.keySet().iterator().next(); final ReplicationTracker tracker = newTracker(primaryId); - tracker.updateFromMaster(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet(), primaryId)); + tracker.updateFromClusterManager(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet(), primaryId)); tracker.activatePrimaryMode(NO_OPS_PERFORMED); initializing.keySet().forEach(k -> markAsTrackingAndInSyncQuietly(tracker, k.getId(), NO_OPS_PERFORMED)); nonApproved.keySet() @@ -335,7 +335,7 @@ public void testInSyncIdsAreRemovedIfNotValidatedByClusterManager() { allocations.putAll(initializingToBeRemoved); } final ReplicationTracker tracker = newTracker(primaryId); - tracker.updateFromMaster(initialClusterStateVersion, ids(active), routingTable(initializing, primaryId)); + tracker.updateFromClusterManager(initialClusterStateVersion, ids(active), routingTable(initializing, primaryId)); tracker.activatePrimaryMode(NO_OPS_PERFORMED); if (randomBoolean()) { initializingToStay.keySet().forEach(k -> markAsTrackingAndInSyncQuietly(tracker, k.getId(), NO_OPS_PERFORMED)); @@ -348,7 +348,7 @@ public void testInSyncIdsAreRemovedIfNotValidatedByClusterManager() { // now remove shards if (randomBoolean()) { - tracker.updateFromMaster( + tracker.updateFromClusterManager( initialClusterStateVersion + 1, ids(activeToStay.keySet()), routingTable(initializingToStay.keySet(), primaryId) @@ -356,7 +356,7 @@ public void testInSyncIdsAreRemovedIfNotValidatedByClusterManager() { allocations.forEach((aid, ckp) -> updateLocalCheckpoint(tracker, aid.getId(), ckp + 10L)); } else { allocations.forEach((aid, ckp) -> updateLocalCheckpoint(tracker, aid.getId(), ckp + 10L)); - tracker.updateFromMaster( + tracker.updateFromClusterManager( initialClusterStateVersion + 2, ids(activeToStay.keySet()), routingTable(initializingToStay.keySet(), primaryId) @@ -378,7 +378,7 @@ public void testWaitForAllocationIdToBeInSync() throws Exception { final AllocationId trackingAllocationId = AllocationId.newInitializing(); final ReplicationTracker tracker = newTracker(inSyncAllocationId); final long clusterStateVersion = randomNonNegativeLong(); - tracker.updateFromMaster( + tracker.updateFromClusterManager( clusterStateVersion, Collections.singleton(inSyncAllocationId.getId()), routingTable(Collections.singleton(trackingAllocationId), inSyncAllocationId) @@ -422,7 +422,7 @@ public void testWaitForAllocationIdToBeInSync() throws Exception { assertTrue(tracker.getTrackedLocalCheckpointForShard(trackingAllocationId.getId()).inSync); } else { // cluster-manager changes its mind and cancels the allocation - tracker.updateFromMaster( + tracker.updateFromClusterManager( clusterStateVersion + 1, Collections.singleton(inSyncAllocationId.getId()), routingTable(emptySet(), inSyncAllocationId) @@ -449,7 +449,7 @@ public void testWaitForAllocationIdToBeInSyncCanBeInterrupted() throws BrokenBar final AllocationId inSyncAllocationId = AllocationId.newInitializing(); final AllocationId trackingAllocationId = AllocationId.newInitializing(); final ReplicationTracker tracker = newTracker(inSyncAllocationId); - tracker.updateFromMaster( + tracker.updateFromClusterManager( randomNonNegativeLong(), Collections.singleton(inSyncAllocationId.getId()), routingTable(Collections.singleton(trackingAllocationId), inSyncAllocationId) @@ -505,7 +505,7 @@ public void testUpdateAllocationIdsFromClusterManager() throws Exception { AllocationId primaryId = activeAllocationIds.iterator().next(); IndexShardRoutingTable routingTable = routingTable(initializingIds, primaryId); final ReplicationTracker tracker = newTracker(primaryId); - tracker.updateFromMaster(initialClusterStateVersion, ids(activeAllocationIds), routingTable); + tracker.updateFromClusterManager(initialClusterStateVersion, ids(activeAllocationIds), routingTable); tracker.activatePrimaryMode(NO_OPS_PERFORMED); assertThat(tracker.getReplicationGroup().getInSyncAllocationIds(), equalTo(ids(activeAllocationIds))); assertThat(tracker.getReplicationGroup().getRoutingTable(), equalTo(routingTable)); @@ -539,7 +539,7 @@ public void testUpdateAllocationIdsFromClusterManager() throws Exception { .filter(a -> !removingInitializingAllocationIds.contains(a)) .collect(Collectors.toSet()); routingTable = routingTable(newInitializingAllocationIds, primaryId); - tracker.updateFromMaster(initialClusterStateVersion + 1, ids(newActiveAllocationIds), routingTable); + tracker.updateFromClusterManager(initialClusterStateVersion + 1, ids(newActiveAllocationIds), routingTable); assertTrue(newActiveAllocationIds.stream().allMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).inSync)); assertTrue(removingActiveAllocationIds.stream().allMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()) == null)); assertTrue(newInitializingAllocationIds.stream().noneMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).inSync)); @@ -555,7 +555,7 @@ public void testUpdateAllocationIdsFromClusterManager() throws Exception { * than we have been using above ensures that we can not collide with a previous allocation ID */ newInitializingAllocationIds.add(AllocationId.newInitializing()); - tracker.updateFromMaster( + tracker.updateFromClusterManager( initialClusterStateVersion + 2, ids(newActiveAllocationIds), routingTable(newInitializingAllocationIds, primaryId) @@ -607,7 +607,7 @@ public void testUpdateAllocationIdsFromClusterManager() throws Exception { // using a different length than we have been using above ensures that we can not collide with a previous allocation ID final AllocationId newSyncingAllocationId = AllocationId.newInitializing(); newInitializingAllocationIds.add(newSyncingAllocationId); - tracker.updateFromMaster( + tracker.updateFromClusterManager( initialClusterStateVersion + 3, ids(newActiveAllocationIds), routingTable(newInitializingAllocationIds, primaryId) @@ -649,7 +649,7 @@ public void testUpdateAllocationIdsFromClusterManager() throws Exception { * the in-sync set even if we receive a cluster state update that does not reflect this. * */ - tracker.updateFromMaster( + tracker.updateFromClusterManager( initialClusterStateVersion + 4, ids(newActiveAllocationIds), routingTable(newInitializingAllocationIds, primaryId) @@ -678,7 +678,7 @@ public void testRaceUpdatingGlobalCheckpoint() throws InterruptedException, Brok final int activeLocalCheckpoint = randomIntBetween(0, Integer.MAX_VALUE - 1); final ReplicationTracker tracker = newTracker(active); - tracker.updateFromMaster( + tracker.updateFromClusterManager( randomNonNegativeLong(), Collections.singleton(active.getId()), routingTable(Collections.singleton(initializing), active) @@ -907,7 +907,7 @@ public void testIllegalStateExceptionIfUnknownAllocationId() { final AllocationId active = AllocationId.newInitializing(); final AllocationId initializing = AllocationId.newInitializing(); final ReplicationTracker tracker = newTracker(active); - tracker.updateFromMaster( + tracker.updateFromClusterManager( randomNonNegativeLong(), Collections.singleton(active.getId()), routingTable(Collections.singleton(initializing), active) @@ -938,7 +938,7 @@ public Set initializingIds() { } public void apply(ReplicationTracker gcp) { - gcp.updateFromMaster(version, ids(inSyncIds), routingTable); + gcp.updateFromClusterManager(version, ids(inSyncIds), routingTable); } } @@ -1114,7 +1114,7 @@ public void testPeerRecoveryRetentionLeaseCreationAndRenewal() { tracker.updateRetentionLeasesOnReplica(new RetentionLeases(randomNonNegativeLong(), randomNonNegativeLong(), initialLeases)); IndexShardRoutingTable routingTable = routingTable(initializingAllocationIds, primaryId); - tracker.updateFromMaster(initialClusterStateVersion, ids(activeAllocationIds), routingTable); + tracker.updateFromClusterManager(initialClusterStateVersion, ids(activeAllocationIds), routingTable); tracker.activatePrimaryMode(NO_OPS_PERFORMED); assertTrue( "primary's retention lease should exist", @@ -1184,7 +1184,7 @@ public void testPeerRecoveryRetentionLeaseCreationAndRenewal() { routingTable = routingTableBuilder.build(); activeAllocationIds.addAll(initializingAllocationIds); - tracker.updateFromMaster(initialClusterStateVersion + randomLongBetween(1, 10), ids(activeAllocationIds), routingTable); + tracker.updateFromClusterManager(initialClusterStateVersion + randomLongBetween(1, 10), ids(activeAllocationIds), routingTable); assertAsTimePasses.accept(() -> { // Leases still don't expire diff --git a/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java index c37a610b28bc9..769fdc220bec4 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java @@ -397,15 +397,21 @@ public ClusterState addNodes(ClusterState clusterState, List node ); } - public ClusterState joinNodesAndBecomeMaster(ClusterState clusterState, List nodes) { + public ClusterState joinNodesAndBecomeClusterManager(ClusterState clusterState, List nodes) { List joinNodes = new ArrayList<>(); - joinNodes.add(JoinTaskExecutor.newBecomeMasterTask()); + joinNodes.add(JoinTaskExecutor.newBecomeClusterManagerTask()); joinNodes.add(JoinTaskExecutor.newFinishElectionTask()); joinNodes.addAll(nodes.stream().map(node -> new JoinTaskExecutor.Task(node, "dummy reason")).collect(Collectors.toList())); return runTasks(joinTaskExecutor, clusterState, joinNodes); } + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #joinNodesAndBecomeClusterManager(ClusterState, List)} */ + @Deprecated + public ClusterState joinNodesAndBecomeMaster(ClusterState clusterState, List nodes) { + return joinNodesAndBecomeClusterManager(clusterState, nodes); + } + public ClusterState removeNodes(ClusterState clusterState, List nodes) { return runTasks( nodeRemovalExecutor, diff --git a/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index b686641eef6c3..903bdccef5587 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -495,7 +495,7 @@ public ClusterState randomlyUpdateClusterState( // remove node if (state.nodes().getDataNodes().size() > 3) { DiscoveryNode discoveryNode = randomFrom(state.nodes().getNodes().values().toArray(DiscoveryNode.class)); - if (discoveryNode.equals(state.nodes().getMasterNode()) == false) { + if (discoveryNode.equals(state.nodes().getClusterManagerNode()) == false) { state = cluster.removeNodes(state, Collections.singletonList(discoveryNode)); updateNodes(state, clusterStateServiceMap, indicesServiceSupplier); } diff --git a/server/src/test/java/org/opensearch/persistent/PersistentTasksClusterServiceTests.java b/server/src/test/java/org/opensearch/persistent/PersistentTasksClusterServiceTests.java index 40ffa2eeb0aff..7e23e6ef3748c 100644 --- a/server/src/test/java/org/opensearch/persistent/PersistentTasksClusterServiceTests.java +++ b/server/src/test/java/org/opensearch/persistent/PersistentTasksClusterServiceTests.java @@ -153,7 +153,7 @@ public void testReassignmentRequiredOnMetadataChanges() { DiscoveryNodes nodes = DiscoveryNodes.builder() .add(new DiscoveryNode("_node", buildNewFakeTransportAddress(), Version.CURRENT)) .localNodeId("_node") - .masterNodeId("_node") + .clusterManagerNodeId("_node") .build(); boolean unassigned = randomBoolean(); @@ -536,7 +536,7 @@ public void testPeriodicRecheckOffClusterManager() { builder = ClusterState.builder(clusterState); nodes = DiscoveryNodes.builder(clusterState.nodes()); nodes.add(DiscoveryNode.createLocal(Settings.EMPTY, buildNewFakeTransportAddress(), "a_new_cluster_manager_node")); - nodes.masterNodeId("a_new_cluster_manager_node"); + nodes.clusterManagerNodeId("a_new_cluster_manager_node"); ClusterState nonMasterClusterState = builder.nodes(nodes).build(); event = new ClusterChangedEvent("test", nonMasterClusterState, clusterState); service.clusterChanged(event); @@ -554,7 +554,7 @@ public void testUnassignTask() { DiscoveryNodes.Builder nodes = DiscoveryNodes.builder() .add(new DiscoveryNode("_node_1", buildNewFakeTransportAddress(), Version.CURRENT)) .localNodeId("_node_1") - .masterNodeId("_node_1") + .clusterManagerNodeId("_node_1") .add(new DiscoveryNode("_node_2", buildNewFakeTransportAddress(), Version.CURRENT)); String unassignedId = addTask(tasks, "unassign", "_node_2"); @@ -579,7 +579,7 @@ public void testUnassignNonExistentTask() { DiscoveryNodes.Builder nodes = DiscoveryNodes.builder() .add(new DiscoveryNode("_node_1", buildNewFakeTransportAddress(), Version.CURRENT)) .localNodeId("_node_1") - .masterNodeId("_node_1") + .clusterManagerNodeId("_node_1") .add(new DiscoveryNode("_node_2", buildNewFakeTransportAddress(), Version.CURRENT)); Metadata.Builder metadata = Metadata.builder(clusterState.metadata()).putCustom(PersistentTasksCustomMetadata.TYPE, tasks.build()); @@ -902,7 +902,7 @@ private ClusterState initialState() { DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(); nodes.add(DiscoveryNode.createLocal(Settings.EMPTY, buildNewFakeTransportAddress(), "this_node")); nodes.localNodeId("this_node"); - nodes.masterNodeId("this_node"); + nodes.clusterManagerNodeId("this_node"); return ClusterState.builder(ClusterName.DEFAULT).nodes(nodes).metadata(metadata).routingTable(routingTable.build()).build(); } diff --git a/server/src/test/java/org/opensearch/persistent/PersistentTasksCustomMetadataTests.java b/server/src/test/java/org/opensearch/persistent/PersistentTasksCustomMetadataTests.java index 96b33153ccf31..2b1ca6092a088 100644 --- a/server/src/test/java/org/opensearch/persistent/PersistentTasksCustomMetadataTests.java +++ b/server/src/test/java/org/opensearch/persistent/PersistentTasksCustomMetadataTests.java @@ -330,7 +330,7 @@ public void testDisassociateDeadNodes_givenAssignedPersistentTask() { DiscoveryNodes nodes = DiscoveryNodes.builder() .add(new DiscoveryNode("node1", buildNewFakeTransportAddress(), Version.CURRENT)) .localNodeId("node1") - .masterNodeId("node1") + .clusterManagerNodeId("node1") .build(); String taskName = "test/task"; @@ -358,7 +358,7 @@ public void testDisassociateDeadNodes() { DiscoveryNodes nodes = DiscoveryNodes.builder() .add(new DiscoveryNode("node1", buildNewFakeTransportAddress(), Version.CURRENT)) .localNodeId("node1") - .masterNodeId("node1") + .clusterManagerNodeId("node1") .build(); String taskName = "test/task"; diff --git a/server/src/test/java/org/opensearch/snapshots/InternalSnapshotsInfoServiceTests.java b/server/src/test/java/org/opensearch/snapshots/InternalSnapshotsInfoServiceTests.java index f0e283e6dde6d..483b5c268d3f9 100644 --- a/server/src/test/java/org/opensearch/snapshots/InternalSnapshotsInfoServiceTests.java +++ b/server/src/test/java/org/opensearch/snapshots/InternalSnapshotsInfoServiceTests.java @@ -495,7 +495,7 @@ private ClusterState demoteClusterManagerNode(final ClusterState currentState) { assertThat(currentState.nodes().get(node.getId()), nullValue()); return ClusterState.builder(currentState) - .nodes(DiscoveryNodes.builder(currentState.nodes()).add(node).masterNodeId(node.getId())) + .nodes(DiscoveryNodes.builder(currentState.nodes()).add(node).clusterManagerNodeId(node.getId())) .build(); } diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 7e0f977e6e835..e20fd347fc3fd 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -1342,13 +1342,13 @@ private void startCluster() { testClusterNodes.nodes.values() .stream() .map(n -> n.node) - .filter(DiscoveryNode::isMasterNode) + .filter(DiscoveryNode::isClusterManagerNode) .map(DiscoveryNode::getId) .collect(Collectors.toSet()) ); testClusterNodes.nodes.values() .stream() - .filter(n -> n.node.isMasterNode()) + .filter(n -> n.node.isClusterManagerNode()) .forEach(testClusterNode -> testClusterNode.coordinator.setInitialConfiguration(votingConfiguration)); // Connect all nodes to each other testClusterNodes.nodes.values() @@ -1377,7 +1377,7 @@ private void stabilize() { .map(node -> node.clusterService.state()) .collect(Collectors.toList()); final Set clusterManagerNodeIds = clusterStates.stream() - .map(clusterState -> clusterState.nodes().getMasterNodeId()) + .map(clusterState -> clusterState.nodes().getClusterManagerNodeId()) .collect(Collectors.toSet()); final Set terms = clusterStates.stream().map(ClusterState::term).collect(Collectors.toSet()); final List versions = clusterStates.stream().map(ClusterState::version).distinct().collect(Collectors.toList()); @@ -1537,7 +1537,7 @@ public Optional randomClusterManagerNode() { // Select from sorted list of data-nodes here to not have deterministic behaviour final List clusterManagerNodes = testClusterNodes.nodes.values() .stream() - .filter(n -> n.node.isMasterNode()) + .filter(n -> n.node.isClusterManagerNode()) .filter(n -> disconnectedNodes.contains(n.node.getName()) == false) .sorted(Comparator.comparing(n -> n.node.getName())) .collect(Collectors.toList()); @@ -1608,9 +1608,9 @@ public DiscoveryNodes discoveryNodes() { * @return Cluster Manager Node */ public TestClusterNode currentClusterManager(ClusterState state) { - TestClusterNode clusterManager = nodes.get(state.nodes().getMasterNode().getName()); + TestClusterNode clusterManager = nodes.get(state.nodes().getClusterManagerNode().getName()); assertNotNull(clusterManager); - assertTrue(clusterManager.node.isMasterNode()); + assertTrue(clusterManager.node.isClusterManagerNode()); return clusterManager; } @@ -2204,7 +2204,7 @@ public void start(ClusterState initialState) { () -> persistedState, hostsResolver -> nodes.values() .stream() - .filter(n -> n.node.isMasterNode()) + .filter(n -> n.node.isClusterManagerNode()) .map(n -> n.node.getAddress()) .collect(Collectors.toList()), clusterService.getClusterApplierService(), diff --git a/test/framework/src/main/java/org/opensearch/action/support/replication/ClusterStateCreationUtils.java b/test/framework/src/main/java/org/opensearch/action/support/replication/ClusterStateCreationUtils.java index 18f8f4e584748..5089368776ad6 100644 --- a/test/framework/src/main/java/org/opensearch/action/support/replication/ClusterStateCreationUtils.java +++ b/test/framework/src/main/java/org/opensearch/action/support/replication/ClusterStateCreationUtils.java @@ -107,7 +107,7 @@ public static ClusterState state( unassignedNodes.add(node.getId()); } discoBuilder.localNodeId(newNode(0).getId()); - discoBuilder.masterNodeId(newNode(1).getId()); // we need a non-local cluster-manager to test shard failures + discoBuilder.clusterManagerNodeId(newNode(1).getId()); // we need a non-local cluster-manager to test shard failures final int primaryTerm = 1 + randomInt(200); IndexMetadata indexMetadata = IndexMetadata.builder(index) .settings( @@ -198,7 +198,7 @@ public static ClusterState state(String index, final int numberOfNodes, final in nodes.add(node.getId()); } discoBuilder.localNodeId(newNode(0).getId()); - discoBuilder.masterNodeId(randomFrom(nodes)); + discoBuilder.clusterManagerNodeId(randomFrom(nodes)); IndexMetadata indexMetadata = IndexMetadata.builder(index) .settings( Settings.builder() @@ -240,7 +240,7 @@ public static ClusterState state(final int numberOfNodes, final String[] indices nodes.add(node.getId()); } discoBuilder.localNodeId(newNode(0).getId()); - discoBuilder.masterNodeId(newNode(0).getId()); + discoBuilder.clusterManagerNodeId(newNode(0).getId()); Metadata.Builder metadata = Metadata.builder(); RoutingTable.Builder routingTable = RoutingTable.builder(); List nodesList = new ArrayList<>(nodes); @@ -291,7 +291,7 @@ public static ClusterState stateWithAssignedPrimariesAndOneReplica(String index, discoBuilder = discoBuilder.add(node); } discoBuilder.localNodeId(newNode(0).getId()); - discoBuilder.masterNodeId(newNode(1).getId()); // we need a non-local cluster-manager to test shard failures + discoBuilder.clusterManagerNodeId(newNode(1).getId()); // we need a non-local cluster-manager to test shard failures IndexMetadata indexMetadata = IndexMetadata.builder(index) .settings( Settings.builder() @@ -332,7 +332,7 @@ public static ClusterState stateWithAssignedPrimariesAndReplicas(String[] indice discoBuilder = discoBuilder.add(node); } discoBuilder.localNodeId(newNode(0).getId()); - discoBuilder.masterNodeId(newNode(numberOfDataNodes + 1).getId()); + discoBuilder.clusterManagerNodeId(newNode(numberOfDataNodes + 1).getId()); ClusterState.Builder state = ClusterState.builder(new ClusterName("test")); state.nodes(discoBuilder); Builder routingTableBuilder = RoutingTable.builder(); @@ -417,7 +417,7 @@ public static ClusterState stateWithActivePrimary( public static ClusterState stateWithNoShard() { DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); discoBuilder.localNodeId(newNode(0).getId()); - discoBuilder.masterNodeId(newNode(1).getId()); + discoBuilder.clusterManagerNodeId(newNode(1).getId()); ClusterState.Builder state = ClusterState.builder(new ClusterName("test")); state.nodes(discoBuilder); state.metadata(Metadata.builder().generateClusterUuidIfNeeded()); @@ -439,7 +439,7 @@ public static ClusterState state(DiscoveryNode localNode, DiscoveryNode clusterM discoBuilder.add(node); } if (clusterManagerNode != null) { - discoBuilder.masterNodeId(clusterManagerNode.getId()); + discoBuilder.clusterManagerNodeId(clusterManagerNode.getId()); } discoBuilder.localNodeId(localNode.getId()); diff --git a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java index 19551b0adecb2..36469b2ee1999 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java @@ -311,7 +311,7 @@ class Cluster implements Releasable { nodeHealthService ); clusterNodes.add(clusterNode); - if (clusterNode.getLocalNode().isMasterNode()) { + if (clusterNode.getLocalNode().isClusterManagerNode()) { clusterManagerEligibleNodeIds.add(clusterNode.getId()); } } @@ -617,7 +617,7 @@ void stabilise(long stabilisationDurationMillis) { assertTrue(nodeId + " has been bootstrapped", clusterNode.coordinator.isInitialConfigurationSet()); assertThat( nodeId + " has correct cluster-manager", - clusterNode.getLastAppliedClusterState().nodes().getMasterNode(), + clusterNode.getLastAppliedClusterState().nodes().getClusterManagerNode(), equalTo(leader.getLocalNode()) ); assertThat( @@ -634,7 +634,7 @@ void stabilise(long stabilisationDurationMillis) { assertThat(nodeId + " is not following " + leaderId, clusterNode.coordinator.getMode(), is(CANDIDATE)); assertThat( nodeId + " has no cluster-manager", - clusterNode.getLastAppliedClusterState().nodes().getMasterNode(), + clusterNode.getLastAppliedClusterState().nodes().getClusterManagerNode(), nullValue() ); assertThat( @@ -784,7 +784,7 @@ boolean nodeExists(DiscoveryNode node) { ClusterNode getAnyBootstrappableNode() { return randomFrom( clusterNodes.stream() - .filter(n -> n.getLocalNode().isMasterNode()) + .filter(n -> n.getLocalNode().isClusterManagerNode()) .filter(n -> initialConfiguration.getNodeIds().contains(n.getLocalNode().getId())) .collect(Collectors.toList()) ); @@ -899,7 +899,8 @@ class MockPersistedState implements CoordinationState.PersistedState { final long persistedCurrentTerm; if ( // node is cluster-manager-ineligible either before or after the restart ... - (oldState.getLastAcceptedState().nodes().getLocalNode().isMasterNode() && newLocalNode.isMasterNode()) == false + (oldState.getLastAcceptedState().nodes().getLocalNode().isClusterManagerNode() + && newLocalNode.isClusterManagerNode()) == false // ... and it's accepted some non-initial state so we can roll back ... && (oldState.getLastAcceptedState().term() > 0L || oldState.getLastAcceptedState().version() > 0L) // ... and we're feeling lucky ... @@ -1194,7 +1195,9 @@ ClusterNode restartedNode( address.getAddress(), address, Collections.emptyMap(), - localNode.isMasterNode() && DiscoveryNode.isMasterNode(nodeSettings) ? DiscoveryNodeRole.BUILT_IN_ROLES : emptySet(), + localNode.isClusterManagerNode() && DiscoveryNode.isClusterManagerNode(nodeSettings) + ? DiscoveryNodeRole.BUILT_IN_ROLES + : emptySet(), Version.CURRENT ); return new ClusterNode( @@ -1291,7 +1294,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS } @Override - public void onNoLongerMaster(String source) { + public void onNoLongerClusterManager(String source) { // in this case, we know for sure that event was not processed by the system and will not change history // remove event to help avoid bloated history and state space explosion in linearizability checker history.remove(eventId); @@ -1346,9 +1349,9 @@ public void onFailure(String source, Exception e) { } @Override - public void onNoLongerMaster(String source) { + public void onNoLongerClusterManager(String source) { logger.trace("no longer cluster-manager: [{}]", source); - taskListener.onNoLongerMaster(source); + taskListener.onNoLongerClusterManager(source); } @Override @@ -1427,7 +1430,7 @@ void applyInitialConfiguration() { } private boolean isNotUsefullyBootstrapped() { - return getLocalNode().isMasterNode() == false || coordinator.isInitialConfigurationSet() == false; + return getLocalNode().isClusterManagerNode() == false || coordinator.isInitialConfigurationSet() == false; } void allowClusterStateApplicationFailure() { diff --git a/test/framework/src/main/java/org/opensearch/cluster/coordination/CoordinationStateTestCluster.java b/test/framework/src/main/java/org/opensearch/cluster/coordination/CoordinationStateTestCluster.java index ca4a33fa677c6..5ef7fb192b054 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/coordination/CoordinationStateTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/cluster/coordination/CoordinationStateTestCluster.java @@ -148,7 +148,7 @@ static class ClusterNode { } void reboot() { - if (localNode.isMasterNode() == false && rarely()) { + if (localNode.isClusterManagerNode() == false && rarely()) { // cluster-manager-ineligible nodes can't be trusted to persist the cluster state properly, // but will not lose the fact that they were bootstrapped final CoordinationMetadata.VotingConfiguration votingConfiguration = persistedState.getLastAcceptedState() diff --git a/test/framework/src/main/java/org/opensearch/repositories/blobstore/BlobStoreTestUtil.java b/test/framework/src/main/java/org/opensearch/repositories/blobstore/BlobStoreTestUtil.java index 89d5f6f95bc5a..50158c6ecf053 100644 --- a/test/framework/src/main/java/org/opensearch/repositories/blobstore/BlobStoreTestUtil.java +++ b/test/framework/src/main/java/org/opensearch/repositories/blobstore/BlobStoreTestUtil.java @@ -414,7 +414,9 @@ private static ClusterService mockClusterService(ClusterState initialState) { final DiscoveryNode localNode = new DiscoveryNode("", buildNewFakeTransportAddress(), Version.CURRENT); final AtomicReference currentState = new AtomicReference<>( ClusterState.builder(initialState) - .nodes(DiscoveryNodes.builder().add(localNode).masterNodeId(localNode.getId()).localNodeId(localNode.getId()).build()) + .nodes( + DiscoveryNodes.builder().add(localNode).clusterManagerNodeId(localNode.getId()).localNodeId(localNode.getId()).build() + ) .build() ); when(clusterService.state()).then(invocationOnMock -> currentState.get()); diff --git a/test/framework/src/main/java/org/opensearch/test/ClusterServiceUtils.java b/test/framework/src/main/java/org/opensearch/test/ClusterServiceUtils.java index 99f9b86fb6479..55ae70df9892c 100644 --- a/test/framework/src/main/java/org/opensearch/test/ClusterServiceUtils.java +++ b/test/framework/src/main/java/org/opensearch/test/ClusterServiceUtils.java @@ -79,7 +79,7 @@ public static MasterService createMasterService(ThreadPool threadPool, ClusterSt public static MasterService createMasterService(ThreadPool threadPool, DiscoveryNode localNode) { ClusterState initialClusterState = ClusterState.builder(new ClusterName(ClusterServiceUtils.class.getSimpleName())) - .nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).masterNodeId(localNode.getId())) + .nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).clusterManagerNodeId(localNode.getId())) .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK) .build(); return createMasterService(threadPool, initialClusterState); @@ -160,7 +160,7 @@ public static ClusterService createClusterService(ThreadPool threadPool, Discove ClusterService clusterService = new ClusterService(settings, clusterSettings, threadPool); clusterService.setNodeConnectionsService(createNoOpNodeConnectionsService()); ClusterState initialClusterState = ClusterState.builder(new ClusterName(ClusterServiceUtils.class.getSimpleName())) - .nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).masterNodeId(localNode.getId())) + .nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).clusterManagerNodeId(localNode.getId())) .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK) .build(); clusterService.getClusterApplierService().setInitialState(initialClusterState); diff --git a/test/framework/src/main/java/org/opensearch/test/ExternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/ExternalTestCluster.java index 6dec5858b398a..33147447bd469 100644 --- a/test/framework/src/main/java/org/opensearch/test/ExternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/ExternalTestCluster.java @@ -148,7 +148,7 @@ public ExternalTestCluster( if (DiscoveryNode.isDataNode(nodeInfo.getSettings())) { dataNodes++; clusterManagerAndDataNodes++; - } else if (DiscoveryNode.isMasterNode(nodeInfo.getSettings())) { + } else if (DiscoveryNode.isClusterManagerNode(nodeInfo.getSettings())) { clusterManagerAndDataNodes++; } } diff --git a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java index 93f0bf8cb6190..7124cc6a180ac 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java @@ -199,11 +199,11 @@ public final class InternalTestCluster extends TestCluster { nodeAndClient.node.settings() ); - private static final Predicate NO_DATA_NO_CLUSTER_MANAGER_PREDICATE = nodeAndClient -> DiscoveryNode.isMasterNode( - nodeAndClient.node.settings() - ) == false && DiscoveryNode.isDataNode(nodeAndClient.node.settings()) == false; + private static final Predicate NO_DATA_NO_CLUSTER_MANAGER_PREDICATE = nodeAndClient -> DiscoveryNode + .isClusterManagerNode(nodeAndClient.node.settings()) == false + && DiscoveryNode.isDataNode(nodeAndClient.node.settings()) == false; - private static final Predicate CLUSTER_MANAGER_NODE_PREDICATE = nodeAndClient -> DiscoveryNode.isMasterNode( + private static final Predicate CLUSTER_MANAGER_NODE_PREDICATE = nodeAndClient -> DiscoveryNode.isClusterManagerNode( nodeAndClient.node.settings() ); @@ -792,13 +792,13 @@ private static String getRoleSuffix(Settings settings) { String suffix = ""; // only add the suffixes if roles are explicitly defined if (settings.hasValue("nodes.roles")) { - if (DiscoveryNode.isMasterNode(settings)) { + if (DiscoveryNode.isClusterManagerNode(settings)) { suffix = suffix + DiscoveryNodeRole.CLUSTER_MANAGER_ROLE.roleNameAbbreviation(); } if (DiscoveryNode.isDataNode(settings)) { suffix = suffix + DiscoveryNodeRole.DATA_ROLE.roleNameAbbreviation(); } - if (!DiscoveryNode.isMasterNode(settings) && !DiscoveryNode.isDataNode(settings)) { + if (!DiscoveryNode.isClusterManagerNode(settings) && !DiscoveryNode.isDataNode(settings)) { suffix = suffix + "c"; } } @@ -936,7 +936,7 @@ public String getName() { } public boolean isMasterEligible() { - return DiscoveryNode.isMasterNode(node.settings()); + return DiscoveryNode.isClusterManagerNode(node.settings()); } Client client() { @@ -1164,7 +1164,7 @@ private synchronized void reset(boolean wipeData) throws IOException { int autoBootstrapClusterManagerNodeIndex = -1; final List clusterManagerNodeNames = settings.stream() - .filter(DiscoveryNode::isMasterNode) + .filter(DiscoveryNode::isClusterManagerNode) .map(Node.NODE_NAME_SETTING::get) .collect(Collectors.toList()); @@ -1222,7 +1222,10 @@ public synchronized void validateClusterFormed() { .collect(Collectors.toList()); final String debugString = ", expected nodes: " + expectedNodes + " and actual cluster states " + states; // all nodes have a cluster-manager - assertTrue("Missing cluster-manager" + debugString, states.stream().allMatch(cs -> cs.nodes().getMasterNodeId() != null)); + assertTrue( + "Missing cluster-manager" + debugString, + states.stream().allMatch(cs -> cs.nodes().getClusterManagerNodeId() != null) + ); // all nodes have the same cluster-manager (in same term) assertEquals( "Not all cluster-managers in same term" + debugString, @@ -1742,7 +1745,7 @@ private void rebuildUnicastHostFiles(List newNodes) { .filter(Objects::nonNull) .map(TransportService::getLocalNode) .filter(Objects::nonNull) - .filter(DiscoveryNode::isMasterNode) + .filter(DiscoveryNode::isClusterManagerNode) .map(n -> n.getAddress().toString()) .distinct() .collect(Collectors.toList()); @@ -1964,7 +1967,7 @@ public String getMasterName() { public String getMasterName(@Nullable String viaNode) { try { Client client = viaNode != null ? client(viaNode) : client(); - return client.admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(); + return client.admin().cluster().prepareState().get().getState().nodes().getClusterManagerNode().getName(); } catch (Exception e) { logger.warn("Can't fetch cluster state", e); throw new RuntimeException("Can't get cluster-manager node " + e.getMessage(), e); @@ -2022,7 +2025,7 @@ private List bootstrapClusterManagerNodeWithSpecifiedIndex(List newSettings = new ArrayList<>(); for (Settings settings : allNodesSettings) { - if (DiscoveryNode.isMasterNode(settings) == false) { + if (DiscoveryNode.isClusterManagerNode(settings) == false) { newSettings.add(settings); } else { currentNodeId++; @@ -2032,13 +2035,13 @@ private List bootstrapClusterManagerNodeWithSpecifiedIndex(List nodeNames = new ArrayList<>(); for (Settings nodeSettings : getDataOrMasterNodeInstances(Settings.class)) { - if (DiscoveryNode.isMasterNode(nodeSettings)) { + if (DiscoveryNode.isClusterManagerNode(nodeSettings)) { nodeNames.add(Node.NODE_NAME_SETTING.get(nodeSettings)); } } for (Settings nodeSettings : allNodesSettings) { - if (DiscoveryNode.isMasterNode(nodeSettings)) { + if (DiscoveryNode.isClusterManagerNode(nodeSettings)) { nodeNames.add(Node.NODE_NAME_SETTING.get(nodeSettings)); } } @@ -2097,7 +2100,7 @@ public List startNodes(int numOfNodes, Settings settings) { * Starts multiple nodes with the given settings and returns their names */ public synchronized List startNodes(Settings... extraSettings) { - final int newClusterManagerCount = Math.toIntExact(Stream.of(extraSettings).filter(DiscoveryNode::isMasterNode).count()); + final int newClusterManagerCount = Math.toIntExact(Stream.of(extraSettings).filter(DiscoveryNode::isClusterManagerNode).count()); final int defaultMinClusterManagerNodes; if (autoManageClusterManagerNodes) { defaultMinClusterManagerNodes = getMinClusterManagerNodes(getClusterManagerNodesCount() + newClusterManagerCount); @@ -2110,7 +2113,7 @@ public synchronized List startNodes(Settings... extraSettings) { && prevClusterManagerCount == 0 && newClusterManagerCount > 0 && Arrays.stream(extraSettings) - .allMatch(s -> DiscoveryNode.isMasterNode(s) == false || ZEN2_DISCOVERY_TYPE.equals(DISCOVERY_TYPE_SETTING.get(s))) + .allMatch(s -> DiscoveryNode.isClusterManagerNode(s) == false || ZEN2_DISCOVERY_TYPE.equals(DISCOVERY_TYPE_SETTING.get(s))) ? RandomNumbers.randomIntBetween(random, 0, newClusterManagerCount - 1) : -1; @@ -2123,7 +2126,7 @@ public synchronized List startNodes(Settings... extraSettings) { nextNodeId.set(firstNodeId + numOfNodes); final List initialClusterManagerNodes = settings.stream() - .filter(DiscoveryNode::isMasterNode) + .filter(DiscoveryNode::isClusterManagerNode) .map(Node.NODE_NAME_SETTING::get) .collect(Collectors.toList()); @@ -2132,7 +2135,7 @@ public synchronized List startNodes(Settings... extraSettings) { for (int i = 0; i < numOfNodes; i++) { final Settings nodeSettings = updatedSettings.get(i); final Builder builder = Settings.builder(); - if (DiscoveryNode.isMasterNode(nodeSettings)) { + if (DiscoveryNode.isClusterManagerNode(nodeSettings)) { if (autoBootstrapClusterManagerNodeIndex == 0) { builder.putList(INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey(), initialClusterManagerNodes); } @@ -2176,7 +2179,7 @@ private static int getMinClusterManagerNodes(int eligibleClusterManagerNodes) { } private int getClusterManagerNodesCount() { - return (int) nodes.values().stream().filter(n -> DiscoveryNode.isMasterNode(n.node().settings())).count(); + return (int) nodes.values().stream().filter(n -> DiscoveryNode.isClusterManagerNode(n.node().settings())).count(); } public String startClusterManagerOnlyNode() { diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 76c401a82646f..6344fe3435675 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -1100,7 +1100,7 @@ protected void ensureClusterStateConsistency() throws IOException { clusterManagerClusterState = ClusterState.Builder.fromBytes(masterClusterStateBytes, null, namedWriteableRegistry); Map clusterManagerStateMap = convertToMap(clusterManagerClusterState); int clusterManagerClusterStateSize = clusterManagerClusterState.toString().length(); - String clusterManagerId = clusterManagerClusterState.nodes().getMasterNodeId(); + String clusterManagerId = clusterManagerClusterState.nodes().getClusterManagerNodeId(); for (Client client : cluster().getClients()) { ClusterState localClusterState = client.admin().cluster().prepareState().all().setLocal(true).get().getState(); byte[] localClusterStateBytes = ClusterState.Builder.toBytes(localClusterState); @@ -1112,7 +1112,7 @@ protected void ensureClusterStateConsistency() throws IOException { // that the cluster-manager node matches the cluster-manager (otherwise there is no requirement for the cluster state to // match) if (clusterManagerClusterState.version() == localClusterState.version() - && clusterManagerId.equals(localClusterState.nodes().getMasterNodeId())) { + && clusterManagerId.equals(localClusterState.nodes().getClusterManagerNodeId())) { try { assertEquals( "cluster state UUID does not match", diff --git a/test/framework/src/test/java/org/opensearch/test/test/InternalTestClusterTests.java b/test/framework/src/test/java/org/opensearch/test/test/InternalTestClusterTests.java index 262008507753e..a0b392f3fa669 100644 --- a/test/framework/src/test/java/org/opensearch/test/test/InternalTestClusterTests.java +++ b/test/framework/src/test/java/org/opensearch/test/test/InternalTestClusterTests.java @@ -439,7 +439,7 @@ public Path nodeConfigPath(int nodeOrdinal) { for (String name : cluster.getNodeNames()) { DiscoveryNode node = cluster.getInstance(ClusterService.class, name).localNode(); List paths = Arrays.stream(getNodePaths(cluster, name)).map(Path::toString).collect(Collectors.toList()); - if (node.isMasterNode()) { + if (node.isClusterManagerNode()) { result.computeIfAbsent(clusterManagerRole, k -> new HashSet<>()).addAll(paths); } else if (node.isDataNode()) { result.computeIfAbsent(DiscoveryNodeRole.DATA_ROLE, k -> new HashSet<>()).addAll(paths); From 29adf2aaaae25306d8368c779563fd763c9e4bc3 Mon Sep 17 00:00:00 2001 From: Sarat Vemulapalli Date: Wed, 20 Jul 2022 16:02:31 -0700 Subject: [PATCH 025/104] Upgrading Joda version (#3935) * Updating Joda Signed-off-by: Sarat Vemulapalli * Fixing Tests Signed-off-by: Sarat Vemulapalli * update SHAs Signed-off-by: Sarat Vemulapalli --- buildSrc/version.properties | 2 +- server/licenses/joda-time-2.10.12.jar.sha1 | 1 - server/licenses/joda-time-2.10.13.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 server/licenses/joda-time-2.10.12.jar.sha1 create mode 100644 server/licenses/joda-time-2.10.13.jar.sha1 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 75514f9738190..5c2f5c86101d2 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -22,7 +22,7 @@ asm = 9.3 jna = 5.5.0 netty = 4.1.79.Final -joda = 2.10.12 +joda = 2.10.13 # client dependencies httpclient = 4.5.13 diff --git a/server/licenses/joda-time-2.10.12.jar.sha1 b/server/licenses/joda-time-2.10.12.jar.sha1 deleted file mode 100644 index 538f23152f69d..0000000000000 --- a/server/licenses/joda-time-2.10.12.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -95b3f193ad0493d94dcd7daa9ea575c30e6be5f5 \ No newline at end of file diff --git a/server/licenses/joda-time-2.10.13.jar.sha1 b/server/licenses/joda-time-2.10.13.jar.sha1 new file mode 100644 index 0000000000000..4155902a4135d --- /dev/null +++ b/server/licenses/joda-time-2.10.13.jar.sha1 @@ -0,0 +1 @@ +86f338c18cea2a89005556642e81707ff920dd38 \ No newline at end of file From 8b5a10c15873922076520a35589973788a181486 Mon Sep 17 00:00:00 2001 From: Rishikesh Pasham <62345295+Rishikesh1159@users.noreply.github.com> Date: Thu, 21 Jul 2022 13:59:41 +0000 Subject: [PATCH 026/104] Fix possible flaky test for testBeforeIndexShardClosed_CancelsOngoingReplications() (#3963) * Fixing flaky test failure happening for testShardAlreadyReplicating() Signed-off-by: Rishikesh1159 --- .../indices/replication/SegmentReplicationTargetService.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java index c44b27911bb7a..0c3350f224b11 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java @@ -172,6 +172,11 @@ public void run() { private void start(final long replicationId) { try (ReplicationRef replicationRef = onGoingReplications.get(replicationId)) { + // This check is for handling edge cases where the reference is removed before the ReplicationRunner is started by the + // threadpool. + if (replicationRef == null) { + return; + } replicationRef.get().startReplication(new ActionListener<>() { @Override public void onResponse(Void o) { From bb593d6e7c3d057caf5cedd891930dd6caccad9a Mon Sep 17 00:00:00 2001 From: Rishikesh Pasham <62345295+Rishikesh1159@users.noreply.github.com> Date: Thu, 21 Jul 2022 14:29:48 +0000 Subject: [PATCH 027/104] [Segment Replication] Checkpoint Replay on Replica Shard (#3658) * Adding Latest Recevied checkpoint, replay checkpoint logic along with tests Signed-off-by: Rishikesh1159 --- .../SegmentReplicationTargetService.java | 28 ++++++++++++++- .../SegmentReplicationTargetServiceTests.java | 35 +++++++++++++++++++ 2 files changed, 62 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java index 0c3350f224b11..f9b40d14b0d53 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java @@ -31,6 +31,8 @@ import org.opensearch.transport.TransportRequestHandler; import org.opensearch.transport.TransportService; +import java.util.HashMap; +import java.util.Map; import java.util.concurrent.atomic.AtomicLong; /** @@ -49,6 +51,8 @@ public class SegmentReplicationTargetService implements IndexEventListener { private final SegmentReplicationSourceFactory sourceFactory; + private final Map latestReceivedCheckpoint = new HashMap<>(); + /** * The internal actions * @@ -91,6 +95,15 @@ public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSh * @param replicaShard replica shard on which checkpoint is received */ public synchronized void onNewCheckpoint(final ReplicationCheckpoint receivedCheckpoint, final IndexShard replicaShard) { + + // Checks if received checkpoint is already present and ahead then it replaces old received checkpoint + if (latestReceivedCheckpoint.get(replicaShard.shardId()) != null) { + if (receivedCheckpoint.isAheadOf(latestReceivedCheckpoint.get(replicaShard.shardId()))) { + latestReceivedCheckpoint.replace(replicaShard.shardId(), receivedCheckpoint); + } + } else { + latestReceivedCheckpoint.put(replicaShard.shardId(), receivedCheckpoint); + } if (onGoingReplications.isShardReplicating(replicaShard.shardId())) { logger.trace( () -> new ParameterizedMessage( @@ -100,10 +113,23 @@ public synchronized void onNewCheckpoint(final ReplicationCheckpoint receivedChe ); return; } + final Thread thread = Thread.currentThread(); if (replicaShard.shouldProcessCheckpoint(receivedCheckpoint)) { startReplication(receivedCheckpoint, replicaShard, new SegmentReplicationListener() { @Override - public void onReplicationDone(SegmentReplicationState state) {} + public void onReplicationDone(SegmentReplicationState state) { + // if we received a checkpoint during the copy event that is ahead of this + // try and process it. + if (latestReceivedCheckpoint.get(replicaShard.shardId()).isAheadOf(replicaShard.getLatestReplicationCheckpoint())) { + Runnable runnable = () -> onNewCheckpoint(latestReceivedCheckpoint.get(replicaShard.shardId()), replicaShard); + // Checks if we are using same thread and forks if necessary. + if (thread == Thread.currentThread()) { + threadPool.generic().execute(runnable); + } else { + runnable.run(); + } + } + } @Override public void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure) { diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java index 7ff6e3dceabc9..8b4bda7de50ad 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -34,6 +34,7 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.times; import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.timeout; import static org.mockito.Mockito.eq; public class SegmentReplicationTargetServiceTests extends IndexShardTestCase { @@ -203,6 +204,40 @@ public void testNewCheckpoint_validationPassesAndReplicationFails() throws IOExc closeShard(indexShard, false); } + public void testReplicationOnDone() throws IOException { + SegmentReplicationTargetService spy = spy(sut); + IndexShard spyShard = spy(indexShard); + ReplicationCheckpoint cp = indexShard.getLatestReplicationCheckpoint(); + ReplicationCheckpoint newCheckpoint = new ReplicationCheckpoint( + cp.getShardId(), + cp.getPrimaryTerm(), + cp.getSegmentsGen(), + cp.getSeqNo(), + cp.getSegmentInfosVersion() + 1 + ); + ReplicationCheckpoint anotherNewCheckpoint = new ReplicationCheckpoint( + cp.getShardId(), + cp.getPrimaryTerm(), + cp.getSegmentsGen(), + cp.getSeqNo(), + cp.getSegmentInfosVersion() + 2 + ); + ArgumentCaptor captor = ArgumentCaptor.forClass( + SegmentReplicationTargetService.SegmentReplicationListener.class + ); + doNothing().when(spy).startReplication(any(), any(), any()); + spy.onNewCheckpoint(newCheckpoint, spyShard); + spy.onNewCheckpoint(anotherNewCheckpoint, spyShard); + verify(spy, times(1)).startReplication(eq(newCheckpoint), any(), captor.capture()); + verify(spy, times(1)).onNewCheckpoint(eq(anotherNewCheckpoint), any()); + SegmentReplicationTargetService.SegmentReplicationListener listener = captor.getValue(); + listener.onDone(new SegmentReplicationState(new ReplicationLuceneIndex())); + doNothing().when(spy).onNewCheckpoint(any(), any()); + verify(spy, timeout(0).times(2)).onNewCheckpoint(eq(anotherNewCheckpoint), any()); + closeShard(indexShard, false); + + } + public void testBeforeIndexShardClosed_CancelsOngoingReplications() { final SegmentReplicationTarget target = new SegmentReplicationTarget( checkpoint, From b08a2b87ac5ec54b9a6bca4db8f0c57dc3de4431 Mon Sep 17 00:00:00 2001 From: Matt Weber Date: Thu, 21 Jul 2022 11:57:36 -0500 Subject: [PATCH 028/104] Make HybridDirectory MMAP Extensions Configurable (#3837) * Make HybridDirectory MMAP Extensions Configurable Make the HybridDirectory's list of mmap extensions configurable via index settings instead of being hard-coded to a specfic set. Set defaults to the list of currently hard-coded values for backwards compatibility. Signed-off-by: Matt Weber * Add javadoc to INDEX_STORE_HYBRID_MMAP_EXTENSIONS. Signed-off-by: Matt Weber --- .../common/settings/IndexScopedSettings.java | 1 + .../org/opensearch/index/IndexModule.java | 12 +++++ .../index/store/FsDirectoryFactory.java | 44 +++++-------------- .../index/store/FsDirectoryFactoryTests.java | 38 ++++++++++++++-- 4 files changed, 58 insertions(+), 37 deletions(-) diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index 1a31bec5935c8..e6a82d36a18a9 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -180,6 +180,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { BitsetFilterCache.INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING, IndexModule.INDEX_STORE_TYPE_SETTING, IndexModule.INDEX_STORE_PRE_LOAD_SETTING, + IndexModule.INDEX_STORE_HYBRID_MMAP_EXTENSIONS, IndexModule.INDEX_RECOVERY_TYPE_SETTING, IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING, FsDirectoryFactory.INDEX_LOCK_FACTOR_SETTING, diff --git a/server/src/main/java/org/opensearch/index/IndexModule.java b/server/src/main/java/org/opensearch/index/IndexModule.java index 46c2b91b4b99a..f8604caeab414 100644 --- a/server/src/main/java/org/opensearch/index/IndexModule.java +++ b/server/src/main/java/org/opensearch/index/IndexModule.java @@ -148,6 +148,18 @@ public final class IndexModule { Property.NodeScope ); + /** Which lucene file extensions to load with the mmap directory when using hybridfs store. + * This is an expert setting. + * @see Lucene File Extensions. + */ + public static final Setting> INDEX_STORE_HYBRID_MMAP_EXTENSIONS = Setting.listSetting( + "index.store.hybrid.mmap.extensions", + List.of("nvd", "dvd", "tim", "tip", "dim", "kdd", "kdi", "cfs", "doc"), + Function.identity(), + Property.IndexScope, + Property.NodeScope + ); + public static final String SIMILARITY_SETTINGS_PREFIX = "index.similarity"; // whether to use the query cache diff --git a/server/src/main/java/org/opensearch/index/store/FsDirectoryFactory.java b/server/src/main/java/org/opensearch/index/store/FsDirectoryFactory.java index 57cc9f09ac37d..88c7e12632863 100644 --- a/server/src/main/java/org/opensearch/index/store/FsDirectoryFactory.java +++ b/server/src/main/java/org/opensearch/index/store/FsDirectoryFactory.java @@ -97,9 +97,10 @@ protected Directory newFSDirectory(Path location, LockFactory lockFactory, Index case HYBRIDFS: // Use Lucene defaults final FSDirectory primaryDirectory = FSDirectory.open(location, lockFactory); + final Set mmapExtensions = new HashSet<>(indexSettings.getValue(IndexModule.INDEX_STORE_HYBRID_MMAP_EXTENSIONS)); if (primaryDirectory instanceof MMapDirectory) { MMapDirectory mMapDirectory = (MMapDirectory) primaryDirectory; - return new HybridDirectory(lockFactory, setPreload(mMapDirectory, lockFactory, preLoadExtensions)); + return new HybridDirectory(lockFactory, setPreload(mMapDirectory, lockFactory, preLoadExtensions), mmapExtensions); } else { return primaryDirectory; } @@ -142,10 +143,12 @@ public static boolean isHybridFs(Directory directory) { */ static final class HybridDirectory extends NIOFSDirectory { private final MMapDirectory delegate; + private final Set mmapExtensions; - HybridDirectory(LockFactory lockFactory, MMapDirectory delegate) throws IOException { + HybridDirectory(LockFactory lockFactory, MMapDirectory delegate, Set mmapExtensions) throws IOException { super(delegate.getDirectory(), lockFactory); this.delegate = delegate; + this.mmapExtensions = mmapExtensions; } @Override @@ -164,43 +167,16 @@ public IndexInput openInput(String name, IOContext context) throws IOException { } } + boolean useDelegate(String name) { + final String extension = FileSwitchDirectory.getExtension(name); + return mmapExtensions.contains(extension); + } + @Override public void close() throws IOException { IOUtils.close(super::close, delegate); } - boolean useDelegate(String name) { - String extension = FileSwitchDirectory.getExtension(name); - switch (extension) { - // Norms, doc values and term dictionaries are typically performance-sensitive and hot in the page - // cache, so we use mmap, which provides better performance. - case "nvd": - case "dvd": - case "tim": - // We want to open the terms index and KD-tree index off-heap to save memory, but this only performs - // well if using mmap. - case "tip": - // dim files only apply up to lucene 8.x indices. It can be removed once we are in lucene 10 - case "dim": - case "kdd": - case "kdi": - // Compound files are tricky because they store all the information for the segment. Benchmarks - // suggested that not mapping them hurts performance. - case "cfs": - // MMapDirectory has special logic to read long[] arrays in little-endian order that helps speed - // up the decoding of postings. The same logic applies to positions (.pos) of offsets (.pay) but we - // are not mmaping them as queries that leverage positions are more costly and the decoding of postings - // tends to be less a bottleneck. - case "doc": - return true; - // Other files are either less performance-sensitive (e.g. stored field index, norms metadata) - // or are large and have a random access pattern and mmap leads to page cache trashing - // (e.g. stored fields and term vectors). - default: - return false; - } - } - MMapDirectory getDelegate() { return delegate; } diff --git a/server/src/test/java/org/opensearch/index/store/FsDirectoryFactoryTests.java b/server/src/test/java/org/opensearch/index/store/FsDirectoryFactoryTests.java index 70d4ae790f146..cf8d6677b4227 100644 --- a/server/src/test/java/org/opensearch/index/store/FsDirectoryFactoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/FsDirectoryFactoryTests.java @@ -70,20 +70,52 @@ public void testPreload() throws IOException { try (Directory directory = newDirectory(build)) { assertTrue(FsDirectoryFactory.isHybridFs(directory)); FsDirectoryFactory.HybridDirectory hybridDirectory = (FsDirectoryFactory.HybridDirectory) directory; - assertTrue(hybridDirectory.useDelegate("foo.dvd")); + // test default hybrid mmap extensions assertTrue(hybridDirectory.useDelegate("foo.nvd")); + assertTrue(hybridDirectory.useDelegate("foo.dvd")); assertTrue(hybridDirectory.useDelegate("foo.tim")); assertTrue(hybridDirectory.useDelegate("foo.tip")); - assertTrue(hybridDirectory.useDelegate("foo.cfs")); assertTrue(hybridDirectory.useDelegate("foo.dim")); assertTrue(hybridDirectory.useDelegate("foo.kdd")); assertTrue(hybridDirectory.useDelegate("foo.kdi")); - assertFalse(hybridDirectory.useDelegate("foo.bar")); + assertTrue(hybridDirectory.useDelegate("foo.cfs")); + assertTrue(hybridDirectory.useDelegate("foo.doc")); + assertFalse(hybridDirectory.useDelegate("foo.pos")); + assertFalse(hybridDirectory.useDelegate("foo.pay")); MMapDirectory delegate = hybridDirectory.getDelegate(); assertThat(delegate, Matchers.instanceOf(FsDirectoryFactory.PreLoadMMapDirectory.class)); FsDirectoryFactory.PreLoadMMapDirectory preLoadMMapDirectory = (FsDirectoryFactory.PreLoadMMapDirectory) delegate; assertTrue(preLoadMMapDirectory.useDelegate("foo.dvd")); assertTrue(preLoadMMapDirectory.useDelegate("foo.bar")); + assertFalse(preLoadMMapDirectory.useDelegate("foo.cfs")); + } + build = Settings.builder() + .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.HYBRIDFS.name().toLowerCase(Locale.ROOT)) + .putList(IndexModule.INDEX_STORE_PRE_LOAD_SETTING.getKey(), "nvd", "dvd", "cfs") + .putList(IndexModule.INDEX_STORE_HYBRID_MMAP_EXTENSIONS.getKey(), "nvd", "dvd", "tim", "pos", "pay") + .build(); + try (Directory directory = newDirectory(build)) { + assertTrue(FsDirectoryFactory.isHybridFs(directory)); + FsDirectoryFactory.HybridDirectory hybridDirectory = (FsDirectoryFactory.HybridDirectory) directory; + // test custom hybrid mmap extensions + assertTrue(hybridDirectory.useDelegate("foo.nvd")); + assertTrue(hybridDirectory.useDelegate("foo.dvd")); + assertTrue(hybridDirectory.useDelegate("foo.tim")); + assertFalse(hybridDirectory.useDelegate("foo.tip")); + assertFalse(hybridDirectory.useDelegate("foo.dim")); + assertFalse(hybridDirectory.useDelegate("foo.kdd")); + assertFalse(hybridDirectory.useDelegate("foo.kdi")); + assertFalse(hybridDirectory.useDelegate("foo.cfs")); + assertFalse(hybridDirectory.useDelegate("foo.doc")); + assertTrue(hybridDirectory.useDelegate("foo.pos")); + assertTrue(hybridDirectory.useDelegate("foo.pay")); + MMapDirectory delegate = hybridDirectory.getDelegate(); + assertThat(delegate, Matchers.instanceOf(FsDirectoryFactory.PreLoadMMapDirectory.class)); + FsDirectoryFactory.PreLoadMMapDirectory preLoadMMapDirectory = (FsDirectoryFactory.PreLoadMMapDirectory) delegate; + assertTrue(preLoadMMapDirectory.useDelegate("foo.dvd")); + assertFalse(preLoadMMapDirectory.useDelegate("foo.bar")); + assertTrue(preLoadMMapDirectory.useDelegate("foo.cfs")); + assertTrue(preLoadMMapDirectory.useDelegate("foo.nvd")); } } From ccf1d15618770acd54be9b3f37e731612a42e2d6 Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Thu, 21 Jul 2022 22:57:32 -0700 Subject: [PATCH 029/104] Deprecate public methods and variables that contain 'master' terminology in 'client' directory (#3966) * Deprecate public methods and variables that contain 'master' terminology in 'client' directory Signed-off-by: Tianli Feng * Change 3 String type usages with 'master' to 'cluster manager' Signed-off-by: Tianli Feng --- .../client/ClusterRequestConverters.java | 8 ++--- .../client/IndicesRequestConverters.java | 30 ++++++++-------- .../org/opensearch/client/TimedRequest.java | 30 +++++++++++++--- .../indices/GetComponentTemplatesRequest.java | 34 ++++++++++++++++--- .../GetComposableIndexTemplateRequest.java | 34 ++++++++++++++++--- .../indices/GetIndexTemplatesRequest.java | 34 ++++++++++++++++--- .../client/IndicesRequestConvertersTests.java | 4 +-- .../client/RequestConvertersTests.java | 4 +-- .../opensearch/client/TimedRequestTests.java | 6 ++-- .../ClusterClientDocumentationIT.java | 8 ++--- .../IndicesClientDocumentationIT.java | 28 +++++++-------- .../indices/CloseIndexRequestTests.java | 6 ++-- .../main/java/org/opensearch/client/Node.java | 11 +++++- .../org/opensearch/client/NodeSelector.java | 6 ++-- .../sniff/OpenSearchNodesSnifferTests.java | 2 +- 15 files changed, 175 insertions(+), 70 deletions(-) diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/ClusterRequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/ClusterRequestConverters.java index 5c3e403e4ce98..37a1ab8812845 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/ClusterRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/ClusterRequestConverters.java @@ -105,7 +105,7 @@ static Request putComponentTemplate(PutComponentTemplateRequest putComponentTemp .build(); Request request = new Request(HttpPut.METHOD_NAME, endpoint); RequestConverters.Params params = new RequestConverters.Params(); - params.withClusterManagerTimeout(putComponentTemplateRequest.masterNodeTimeout()); + params.withClusterManagerTimeout(putComponentTemplateRequest.clusterManagerNodeTimeout()); if (putComponentTemplateRequest.create()) { params.putParam("create", Boolean.TRUE.toString()); } @@ -124,7 +124,7 @@ static Request getComponentTemplates(GetComponentTemplatesRequest getComponentTe final Request request = new Request(HttpGet.METHOD_NAME, endpoint); final RequestConverters.Params params = new RequestConverters.Params(); params.withLocal(getComponentTemplatesRequest.isLocal()); - params.withClusterManagerTimeout(getComponentTemplatesRequest.getMasterNodeTimeout()); + params.withClusterManagerTimeout(getComponentTemplatesRequest.getClusterManagerNodeTimeout()); request.addParameters(params.asMap()); return request; } @@ -136,7 +136,7 @@ static Request componentTemplatesExist(ComponentTemplatesExistRequest componentT final Request request = new Request(HttpHead.METHOD_NAME, endpoint); final RequestConverters.Params params = new RequestConverters.Params(); params.withLocal(componentTemplatesRequest.isLocal()); - params.withClusterManagerTimeout(componentTemplatesRequest.getMasterNodeTimeout()); + params.withClusterManagerTimeout(componentTemplatesRequest.getClusterManagerNodeTimeout()); request.addParameters(params.asMap()); return request; } @@ -146,7 +146,7 @@ static Request deleteComponentTemplate(DeleteComponentTemplateRequest deleteComp String endpoint = new RequestConverters.EndpointBuilder().addPathPartAsIs("_component_template").addPathPart(name).build(); Request request = new Request(HttpDelete.METHOD_NAME, endpoint); RequestConverters.Params params = new RequestConverters.Params(); - params.withClusterManagerTimeout(deleteComponentTemplateRequest.masterNodeTimeout()); + params.withClusterManagerTimeout(deleteComponentTemplateRequest.clusterManagerNodeTimeout()); request.addParameters(params.asMap()); return request; } diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/IndicesRequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/IndicesRequestConverters.java index 9508faf14c898..3a5384f23b90e 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/IndicesRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/IndicesRequestConverters.java @@ -144,7 +144,7 @@ static Request closeIndex(CloseIndexRequest closeIndexRequest) { RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withTimeout(closeIndexRequest.timeout()); - parameters.withClusterManagerTimeout(closeIndexRequest.masterNodeTimeout()); + parameters.withClusterManagerTimeout(closeIndexRequest.clusterManagerNodeTimeout()); parameters.withIndicesOptions(closeIndexRequest.indicesOptions()); request.addParameters(parameters.asMap()); return request; @@ -156,7 +156,7 @@ static Request createIndex(CreateIndexRequest createIndexRequest) throws IOExcep RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withTimeout(createIndexRequest.timeout()); - parameters.withClusterManagerTimeout(createIndexRequest.masterNodeTimeout()); + parameters.withClusterManagerTimeout(createIndexRequest.clusterManagerNodeTimeout()); parameters.withWaitForActiveShards(createIndexRequest.waitForActiveShards()); request.addParameters(parameters.asMap()); request.setEntity(RequestConverters.createEntity(createIndexRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); @@ -179,7 +179,7 @@ static Request putMapping(PutMappingRequest putMappingRequest) throws IOExceptio RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withTimeout(putMappingRequest.timeout()); - parameters.withClusterManagerTimeout(putMappingRequest.masterNodeTimeout()); + parameters.withClusterManagerTimeout(putMappingRequest.clusterManagerNodeTimeout()); parameters.withIndicesOptions(putMappingRequest.indicesOptions()); request.addParameters(parameters.asMap()); request.setEntity(RequestConverters.createEntity(putMappingRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); @@ -192,7 +192,7 @@ static Request getMappings(GetMappingsRequest getMappingsRequest) { Request request = new Request(HttpGet.METHOD_NAME, RequestConverters.endpoint(indices, "_mapping")); RequestConverters.Params parameters = new RequestConverters.Params(); - parameters.withClusterManagerTimeout(getMappingsRequest.masterNodeTimeout()); + parameters.withClusterManagerTimeout(getMappingsRequest.clusterManagerNodeTimeout()); parameters.withIndicesOptions(getMappingsRequest.indicesOptions()); parameters.withLocal(getMappingsRequest.local()); request.addParameters(parameters.asMap()); @@ -332,7 +332,7 @@ private static Request resize(ResizeRequest resizeRequest, ResizeType type) thro RequestConverters.Params params = new RequestConverters.Params(); params.withTimeout(resizeRequest.timeout()); - params.withClusterManagerTimeout(resizeRequest.masterNodeTimeout()); + params.withClusterManagerTimeout(resizeRequest.clusterManagerNodeTimeout()); params.withWaitForActiveShards(resizeRequest.getWaitForActiveShards()); request.addParameters(params.asMap()); request.setEntity(RequestConverters.createEntity(resizeRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); @@ -365,7 +365,7 @@ static Request rollover(RolloverRequest rolloverRequest) throws IOException { RequestConverters.Params params = new RequestConverters.Params(); params.withTimeout(rolloverRequest.timeout()); - params.withClusterManagerTimeout(rolloverRequest.masterNodeTimeout()); + params.withClusterManagerTimeout(rolloverRequest.clusterManagerNodeTimeout()); params.withWaitForActiveShards(rolloverRequest.getCreateIndexRequest().waitForActiveShards()); if (rolloverRequest.isDryRun()) { params.putParam("dry_run", Boolean.TRUE.toString()); @@ -402,7 +402,7 @@ static Request getIndex(GetIndexRequest getIndexRequest) { params.withLocal(getIndexRequest.local()); params.withIncludeDefaults(getIndexRequest.includeDefaults()); params.withHuman(getIndexRequest.humanReadable()); - params.withClusterManagerTimeout(getIndexRequest.masterNodeTimeout()); + params.withClusterManagerTimeout(getIndexRequest.clusterManagerNodeTimeout()); request.addParameters(params.asMap()); return request; } @@ -461,7 +461,7 @@ static Request putIndexTemplate(PutComposableIndexTemplateRequest putIndexTempla .build(); Request request = new Request(HttpPut.METHOD_NAME, endpoint); RequestConverters.Params params = new RequestConverters.Params(); - params.withClusterManagerTimeout(putIndexTemplateRequest.masterNodeTimeout()); + params.withClusterManagerTimeout(putIndexTemplateRequest.clusterManagerNodeTimeout()); if (putIndexTemplateRequest.create()) { params.putParam("create", Boolean.TRUE.toString()); } @@ -479,7 +479,7 @@ static Request simulateIndexTemplate(SimulateIndexTemplateRequest simulateIndexT .build(); Request request = new Request(HttpPost.METHOD_NAME, endpoint); RequestConverters.Params params = new RequestConverters.Params(); - params.withClusterManagerTimeout(simulateIndexTemplateRequest.masterNodeTimeout()); + params.withClusterManagerTimeout(simulateIndexTemplateRequest.clusterManagerNodeTimeout()); PutComposableIndexTemplateRequest putComposableIndexTemplateRequest = simulateIndexTemplateRequest.indexTemplateV2Request(); if (putComposableIndexTemplateRequest != null) { if (putComposableIndexTemplateRequest.create()) { @@ -529,7 +529,7 @@ static Request getTemplates(GetIndexTemplatesRequest getIndexTemplatesRequest) { final Request request = new Request(HttpGet.METHOD_NAME, endpoint); final RequestConverters.Params params = new RequestConverters.Params(); params.withLocal(getIndexTemplatesRequest.isLocal()); - params.withClusterManagerTimeout(getIndexTemplatesRequest.getMasterNodeTimeout()); + params.withClusterManagerTimeout(getIndexTemplatesRequest.getClusterManagerNodeTimeout()); request.addParameters(params.asMap()); return request; } @@ -541,7 +541,7 @@ static Request getIndexTemplates(GetComposableIndexTemplateRequest getIndexTempl final Request request = new Request(HttpGet.METHOD_NAME, endpoint); final RequestConverters.Params params = new RequestConverters.Params(); params.withLocal(getIndexTemplatesRequest.isLocal()); - params.withClusterManagerTimeout(getIndexTemplatesRequest.getMasterNodeTimeout()); + params.withClusterManagerTimeout(getIndexTemplatesRequest.getClusterManagerNodeTimeout()); request.addParameters(params.asMap()); return request; } @@ -553,7 +553,7 @@ static Request templatesExist(IndexTemplatesExistRequest indexTemplatesExistRequ final Request request = new Request(HttpHead.METHOD_NAME, endpoint); final RequestConverters.Params params = new RequestConverters.Params(); params.withLocal(indexTemplatesExistRequest.isLocal()); - params.withClusterManagerTimeout(indexTemplatesExistRequest.getMasterNodeTimeout()); + params.withClusterManagerTimeout(indexTemplatesExistRequest.getClusterManagerNodeTimeout()); request.addParameters(params.asMap()); return request; } @@ -565,7 +565,7 @@ static Request templatesExist(ComposableIndexTemplateExistRequest indexTemplates final Request request = new Request(HttpHead.METHOD_NAME, endpoint); final RequestConverters.Params params = new RequestConverters.Params(); params.withLocal(indexTemplatesExistRequest.isLocal()); - params.withClusterManagerTimeout(indexTemplatesExistRequest.getMasterNodeTimeout()); + params.withClusterManagerTimeout(indexTemplatesExistRequest.getClusterManagerNodeTimeout()); request.addParameters(params.asMap()); return request; } @@ -597,7 +597,7 @@ static Request deleteIndexTemplate(DeleteComposableIndexTemplateRequest deleteIn String endpoint = new RequestConverters.EndpointBuilder().addPathPartAsIs("_index_template").addPathPart(name).build(); Request request = new Request(HttpDelete.METHOD_NAME, endpoint); RequestConverters.Params params = new RequestConverters.Params(); - params.withClusterManagerTimeout(deleteIndexTemplateRequest.masterNodeTimeout()); + params.withClusterManagerTimeout(deleteIndexTemplateRequest.clusterManagerNodeTimeout()); request.addParameters(params.asMap()); return request; } @@ -610,7 +610,7 @@ static Request deleteAlias(DeleteAliasRequest deleteAliasRequest) { Request request = new Request(HttpDelete.METHOD_NAME, endpoint); RequestConverters.Params parameters = new RequestConverters.Params(); parameters.withTimeout(deleteAliasRequest.timeout()); - parameters.withClusterManagerTimeout(deleteAliasRequest.masterNodeTimeout()); + parameters.withClusterManagerTimeout(deleteAliasRequest.clusterManagerNodeTimeout()); request.addParameters(parameters.asMap()); return request; } diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/TimedRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/TimedRequest.java index b5e7209a5212b..dad5b6a3679ec 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/TimedRequest.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/TimedRequest.java @@ -44,10 +44,13 @@ public abstract class TimedRequest implements Validatable { public static final TimeValue DEFAULT_ACK_TIMEOUT = timeValueSeconds(30); - public static final TimeValue DEFAULT_MASTER_NODE_TIMEOUT = TimeValue.timeValueSeconds(30); + public static final TimeValue DEFAULT_CLUSTER_MANAGER_NODE_TIMEOUT = TimeValue.timeValueSeconds(30); + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #DEFAULT_CLUSTER_MANAGER_NODE_TIMEOUT} */ + @Deprecated + public static final TimeValue DEFAULT_MASTER_NODE_TIMEOUT = DEFAULT_CLUSTER_MANAGER_NODE_TIMEOUT; private TimeValue timeout = DEFAULT_ACK_TIMEOUT; - private TimeValue clusterManagerTimeout = DEFAULT_MASTER_NODE_TIMEOUT; + private TimeValue clusterManagerTimeout = DEFAULT_CLUSTER_MANAGER_NODE_TIMEOUT; /** * Sets the timeout to wait for the all the nodes to acknowledge @@ -61,10 +64,20 @@ public void setTimeout(TimeValue timeout) { * Sets the timeout to connect to the cluster-manager node * @param clusterManagerTimeout timeout as a {@link TimeValue} */ - public void setMasterTimeout(TimeValue clusterManagerTimeout) { + public void setClusterManagerTimeout(TimeValue clusterManagerTimeout) { this.clusterManagerTimeout = clusterManagerTimeout; } + /** + * Sets the timeout to connect to the cluster-manager node + * @param clusterManagerTimeout timeout as a {@link TimeValue} + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #setClusterManagerTimeout(TimeValue)} + */ + @Deprecated + public void setMasterTimeout(TimeValue clusterManagerTimeout) { + setClusterManagerTimeout(clusterManagerTimeout); + } + /** * Returns the request timeout */ @@ -75,7 +88,16 @@ public TimeValue timeout() { /** * Returns the timeout for the request to be completed on the cluster-manager node */ - public TimeValue masterNodeTimeout() { + public TimeValue clusterManagerNodeTimeout() { return clusterManagerTimeout; } + + /** + * Returns the timeout for the request to be completed on the cluster-manager node + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerNodeTimeout()} + */ + @Deprecated + public TimeValue masterNodeTimeout() { + return clusterManagerNodeTimeout(); + } } diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetComponentTemplatesRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetComponentTemplatesRequest.java index ba9702fd6f2f2..6326e8edf763b 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetComponentTemplatesRequest.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetComponentTemplatesRequest.java @@ -44,7 +44,7 @@ public class GetComponentTemplatesRequest implements Validatable { private final String name; - private TimeValue clusterManagerNodeTimeout = TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT; + private TimeValue clusterManagerNodeTimeout = TimedRequest.DEFAULT_CLUSTER_MANAGER_NODE_TIMEOUT; private boolean local = false; /** @@ -67,17 +67,41 @@ public String name() { /** * @return the timeout for waiting for the cluster-manager node to respond */ - public TimeValue getMasterNodeTimeout() { + public TimeValue getClusterManagerNodeTimeout() { return clusterManagerNodeTimeout; } - public void setMasterNodeTimeout(@Nullable TimeValue clusterManagerNodeTimeout) { + /** + * @return the timeout for waiting for the cluster-manager node to respond + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #getMasterNodeTimeout()} + */ + @Deprecated + public TimeValue getMasterNodeTimeout() { + return getClusterManagerNodeTimeout(); + } + + public void setClusterManagerNodeTimeout(@Nullable TimeValue clusterManagerNodeTimeout) { this.clusterManagerNodeTimeout = clusterManagerNodeTimeout; } + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #setClusterManagerNodeTimeout(TimeValue)} */ + @Deprecated + public void setMasterNodeTimeout(@Nullable TimeValue clusterManagerNodeTimeout) { + setClusterManagerNodeTimeout(clusterManagerNodeTimeout); + } + + public void setClusterManagerNodeTimeout(String clusterManagerNodeTimeout) { + final TimeValue timeValue = TimeValue.parseTimeValue( + clusterManagerNodeTimeout, + getClass().getSimpleName() + ".clusterManagerNodeTimeout" + ); + setClusterManagerNodeTimeout(timeValue); + } + + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #setClusterManagerNodeTimeout(String)} */ + @Deprecated public void setMasterNodeTimeout(String clusterManagerNodeTimeout) { - final TimeValue timeValue = TimeValue.parseTimeValue(clusterManagerNodeTimeout, getClass().getSimpleName() + ".masterNodeTimeout"); - setMasterNodeTimeout(timeValue); + setClusterManagerNodeTimeout(clusterManagerNodeTimeout); } /** diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetComposableIndexTemplateRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetComposableIndexTemplateRequest.java index cc8e820d5929f..73f6f15fc7a78 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetComposableIndexTemplateRequest.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetComposableIndexTemplateRequest.java @@ -44,7 +44,7 @@ public class GetComposableIndexTemplateRequest implements Validatable { private final String name; - private TimeValue clusterManagerNodeTimeout = TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT; + private TimeValue clusterManagerNodeTimeout = TimedRequest.DEFAULT_CLUSTER_MANAGER_NODE_TIMEOUT; private boolean local = false; /** @@ -67,17 +67,41 @@ public String name() { /** * @return the timeout for waiting for the cluster-manager node to respond */ - public TimeValue getMasterNodeTimeout() { + public TimeValue getClusterManagerNodeTimeout() { return clusterManagerNodeTimeout; } - public void setMasterNodeTimeout(@Nullable TimeValue clusterManagerNodeTimeout) { + /** + * @return the timeout for waiting for the cluster-manager node to respond + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #getMasterNodeTimeout()} + */ + @Deprecated + public TimeValue getMasterNodeTimeout() { + return getClusterManagerNodeTimeout(); + } + + public void setClusterManagerNodeTimeout(@Nullable TimeValue clusterManagerNodeTimeout) { this.clusterManagerNodeTimeout = clusterManagerNodeTimeout; } + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #setClusterManagerNodeTimeout(TimeValue)} */ + @Deprecated + public void setMasterNodeTimeout(@Nullable TimeValue clusterManagerNodeTimeout) { + setClusterManagerNodeTimeout(clusterManagerNodeTimeout); + } + + public void setClusterManagerNodeTimeout(String clusterManagerNodeTimeout) { + final TimeValue timeValue = TimeValue.parseTimeValue( + clusterManagerNodeTimeout, + getClass().getSimpleName() + ".clusterManagerNodeTimeout" + ); + setClusterManagerNodeTimeout(timeValue); + } + + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #setClusterManagerNodeTimeout(String)} */ + @Deprecated public void setMasterNodeTimeout(String clusterManagerNodeTimeout) { - final TimeValue timeValue = TimeValue.parseTimeValue(clusterManagerNodeTimeout, getClass().getSimpleName() + ".masterNodeTimeout"); - setMasterNodeTimeout(timeValue); + setClusterManagerNodeTimeout(clusterManagerNodeTimeout); } /** diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetIndexTemplatesRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetIndexTemplatesRequest.java index f46af130cc9b0..c1bf001e525b3 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetIndexTemplatesRequest.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetIndexTemplatesRequest.java @@ -51,7 +51,7 @@ public class GetIndexTemplatesRequest implements Validatable { private final List names; - private TimeValue clusterManagerNodeTimeout = TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT; + private TimeValue clusterManagerNodeTimeout = TimedRequest.DEFAULT_CLUSTER_MANAGER_NODE_TIMEOUT; private boolean local = false; /** @@ -86,17 +86,41 @@ public List names() { /** * @return the timeout for waiting for the cluster-manager node to respond */ - public TimeValue getMasterNodeTimeout() { + public TimeValue getClusterManagerNodeTimeout() { return clusterManagerNodeTimeout; } - public void setMasterNodeTimeout(@Nullable TimeValue clusterManagerNodeTimeout) { + /** + * @return the timeout for waiting for the cluster-manager node to respond + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #getMasterNodeTimeout()} + */ + @Deprecated + public TimeValue getMasterNodeTimeout() { + return getClusterManagerNodeTimeout(); + } + + public void setClusterManagerNodeTimeout(@Nullable TimeValue clusterManagerNodeTimeout) { this.clusterManagerNodeTimeout = clusterManagerNodeTimeout; } + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #setClusterManagerNodeTimeout(TimeValue)} */ + @Deprecated + public void setMasterNodeTimeout(@Nullable TimeValue clusterManagerNodeTimeout) { + setClusterManagerNodeTimeout(clusterManagerNodeTimeout); + } + + public void setClusterManagerNodeTimeout(String clusterManagerNodeTimeout) { + final TimeValue timeValue = TimeValue.parseTimeValue( + clusterManagerNodeTimeout, + getClass().getSimpleName() + ".clusterManagerNodeTimeout" + ); + setClusterManagerNodeTimeout(timeValue); + } + + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #setClusterManagerNodeTimeout(String)} */ + @Deprecated public void setMasterNodeTimeout(String clusterManagerNodeTimeout) { - final TimeValue timeValue = TimeValue.parseTimeValue(clusterManagerNodeTimeout, getClass().getSimpleName() + ".masterNodeTimeout"); - setMasterNodeTimeout(timeValue); + setClusterManagerNodeTimeout(clusterManagerNodeTimeout); } /** diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java index bf6d6c922fdd7..fdb5f2843b44d 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java @@ -917,7 +917,7 @@ public void testGetTemplateRequest() throws Exception { List names = OpenSearchTestCase.randomSubsetOf(1, encodes.keySet()); GetIndexTemplatesRequest getTemplatesRequest = new GetIndexTemplatesRequest(names); Map expectedParams = new HashMap<>(); - RequestConvertersTests.setRandomClusterManagerTimeout(getTemplatesRequest::setMasterNodeTimeout, expectedParams); + RequestConvertersTests.setRandomClusterManagerTimeout(getTemplatesRequest::setClusterManagerNodeTimeout, expectedParams); RequestConvertersTests.setRandomLocal(getTemplatesRequest::setLocal, expectedParams); Request request = IndicesRequestConverters.getTemplates(getTemplatesRequest); @@ -946,7 +946,7 @@ public void testTemplatesExistRequest() { ); final Map expectedParams = new HashMap<>(); final IndexTemplatesExistRequest indexTemplatesExistRequest = new IndexTemplatesExistRequest(names); - RequestConvertersTests.setRandomClusterManagerTimeout(indexTemplatesExistRequest::setMasterNodeTimeout, expectedParams); + RequestConvertersTests.setRandomClusterManagerTimeout(indexTemplatesExistRequest::setClusterManagerNodeTimeout, expectedParams); RequestConvertersTests.setRandomLocal(indexTemplatesExistRequest::setLocal, expectedParams); assertThat(indexTemplatesExistRequest.names(), equalTo(names)); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java index a93eef7f55d81..97c0f2f475826 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java @@ -2111,7 +2111,7 @@ static void setRandomClusterManagerTimeout(ClusterManagerNodeRequest request, static void setRandomClusterManagerTimeout(TimedRequest request, Map expectedParams) { setRandomClusterManagerTimeout( - s -> request.setMasterTimeout(TimeValue.parseTimeValue(s, request.getClass().getName() + ".masterNodeTimeout")), + s -> request.setClusterManagerTimeout(TimeValue.parseTimeValue(s, request.getClass().getName() + ".clusterManagerNodeTimeout")), expectedParams ); } @@ -2128,7 +2128,7 @@ static void setRandomClusterManagerTimeout(Consumer setter, Map setter, TimeValue defaultTimeout, Map expectedParams) { if (randomBoolean()) { - TimeValue clusterManagerTimeout = TimeValue.parseTimeValue(randomTimeValue(), "random_master_timeout"); + TimeValue clusterManagerTimeout = TimeValue.parseTimeValue(randomTimeValue(), "random_cluster_manager_timeout"); setter.accept(clusterManagerTimeout); expectedParams.put("cluster_manager_timeout", clusterManagerTimeout.getStringRep()); } else { diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/TimedRequestTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/TimedRequestTests.java index 659238debccad..a630f4f5c1489 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/TimedRequestTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/TimedRequestTests.java @@ -41,7 +41,7 @@ public void testDefaults() { TimedRequest timedRequest = new TimedRequest() { }; assertEquals(timedRequest.timeout(), TimedRequest.DEFAULT_ACK_TIMEOUT); - assertEquals(timedRequest.masterNodeTimeout(), TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT); + assertEquals(timedRequest.clusterManagerNodeTimeout(), TimedRequest.DEFAULT_CLUSTER_MANAGER_NODE_TIMEOUT); } public void testNonDefaults() { @@ -50,8 +50,8 @@ public void testNonDefaults() { TimeValue timeout = TimeValue.timeValueSeconds(randomIntBetween(0, 1000)); TimeValue clusterManagerTimeout = TimeValue.timeValueSeconds(randomIntBetween(0, 1000)); timedRequest.setTimeout(timeout); - timedRequest.setMasterTimeout(clusterManagerTimeout); + timedRequest.setClusterManagerTimeout(clusterManagerTimeout); assertEquals(timedRequest.timeout(), timeout); - assertEquals(timedRequest.masterNodeTimeout(), clusterManagerTimeout); + assertEquals(timedRequest.clusterManagerNodeTimeout(), clusterManagerTimeout); } } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/ClusterClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/ClusterClientDocumentationIT.java index baebd12e22a99..a3803701be718 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/ClusterClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/ClusterClientDocumentationIT.java @@ -515,8 +515,8 @@ public void testGetComponentTemplates() throws Exception { // end::get-component-templates-request // tag::get-component-templates-request-masterTimeout - request.setMasterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1> - request.setMasterNodeTimeout("1m"); // <2> + request.setClusterManagerNodeTimeout(TimeValue.timeValueMinutes(1)); // <1> + request.setClusterManagerNodeTimeout("1m"); // <2> // end::get-component-templates-request-masterTimeout // tag::get-component-templates-execute @@ -603,7 +603,7 @@ public void testPutComponentTemplate() throws Exception { // end::put-component-template-request-create // tag::put-component-template-request-masterTimeout - request.setMasterTimeout(TimeValue.timeValueMinutes(1)); // <1> + request.setClusterManagerTimeout(TimeValue.timeValueMinutes(1)); // <1> // end::put-component-template-request-masterTimeout request.create(false); // make test happy @@ -670,7 +670,7 @@ public void testDeleteComponentTemplate() throws Exception { // end::delete-component-template-request // tag::delete-component-template-request-masterTimeout - deleteRequest.setMasterTimeout(TimeValue.timeValueMinutes(1)); // <1> + deleteRequest.setClusterManagerTimeout(TimeValue.timeValueMinutes(1)); // <1> // end::delete-component-template-request-masterTimeout // tag::delete-component-template-execute diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IndicesClientDocumentationIT.java index 85c5d622f6f60..dd25463d02977 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IndicesClientDocumentationIT.java @@ -381,7 +381,7 @@ public void testCreateIndex() throws IOException { request.setTimeout(TimeValue.timeValueMinutes(2)); // <1> // end::create-index-request-timeout // tag::create-index-request-masterTimeout - request.setMasterTimeout(TimeValue.timeValueMinutes(1)); // <1> + request.setClusterManagerTimeout(TimeValue.timeValueMinutes(1)); // <1> // end::create-index-request-masterTimeout // tag::create-index-request-waitForActiveShards request.waitForActiveShards(ActiveShardCount.from(2)); // <1> @@ -526,7 +526,7 @@ public void testPutMapping() throws IOException { // end::put-mapping-request-timeout // tag::put-mapping-request-masterTimeout - request.setMasterTimeout(TimeValue.timeValueMinutes(1)); // <1> + request.setClusterManagerTimeout(TimeValue.timeValueMinutes(1)); // <1> // end::put-mapping-request-masterTimeout // tag::put-mapping-execute @@ -597,7 +597,7 @@ public void testGetMapping() throws IOException { // end::get-mappings-request // tag::get-mappings-request-masterTimeout - request.setMasterTimeout(TimeValue.timeValueMinutes(1)); // <1> + request.setClusterManagerTimeout(TimeValue.timeValueMinutes(1)); // <1> // end::get-mappings-request-masterTimeout // tag::get-mappings-request-indicesOptions @@ -1374,7 +1374,7 @@ public void testCloseIndex() throws Exception { request.setTimeout(TimeValue.timeValueMinutes(2)); // <1> // end::close-index-request-timeout // tag::close-index-request-masterTimeout - request.setMasterTimeout(TimeValue.timeValueMinutes(1)); // <1> + request.setClusterManagerTimeout(TimeValue.timeValueMinutes(1)); // <1> // end::close-index-request-masterTimeout // tag::close-index-request-indicesOptions @@ -1815,7 +1815,7 @@ public void testRolloverIndex() throws Exception { request.setTimeout(TimeValue.timeValueMinutes(2)); // <1> // end::rollover-index-request-timeout // tag::rollover-index-request-masterTimeout - request.setMasterTimeout(TimeValue.timeValueMinutes(1)); // <1> + request.setClusterManagerTimeout(TimeValue.timeValueMinutes(1)); // <1> // end::rollover-index-request-masterTimeout // tag::rollover-index-request-dryRun request.dryRun(true); // <1> @@ -2231,8 +2231,8 @@ public void testGetTemplates() throws Exception { // end::get-templates-request // tag::get-templates-request-masterTimeout - request.setMasterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1> - request.setMasterNodeTimeout("1m"); // <2> + request.setClusterManagerNodeTimeout(TimeValue.timeValueMinutes(1)); // <1> + request.setClusterManagerNodeTimeout("1m"); // <2> // end::get-templates-request-masterTimeout // tag::get-templates-execute @@ -2291,8 +2291,8 @@ public void testGetIndexTemplatesV2() throws Exception { // end::get-index-templates-v2-request // tag::get-index-templates-v2-request-masterTimeout - request.setMasterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1> - request.setMasterNodeTimeout("1m"); // <2> + request.setClusterManagerNodeTimeout(TimeValue.timeValueMinutes(1)); // <1> + request.setClusterManagerNodeTimeout("1m"); // <2> // end::get-index-templates-v2-request-masterTimeout // tag::get-index-templates-v2-execute @@ -2450,7 +2450,7 @@ public void testPutIndexTemplateV2() throws Exception { // end::put-index-template-v2-request-create // tag::put-index-template-v2-request-masterTimeout - request.setMasterTimeout(TimeValue.timeValueMinutes(1)); // <1> + request.setClusterManagerTimeout(TimeValue.timeValueMinutes(1)); // <1> // end::put-index-template-v2-request-masterTimeout request.create(false); // make test happy @@ -2511,7 +2511,7 @@ public void testDeleteIndexTemplateV2() throws Exception { // end::delete-index-template-v2-request // tag::delete-index-template-v2-request-masterTimeout - deleteRequest.setMasterTimeout(TimeValue.timeValueMinutes(1)); // <1> + deleteRequest.setClusterManagerTimeout(TimeValue.timeValueMinutes(1)); // <1> // end::delete-index-template-v2-request-masterTimeout // tag::delete-index-template-v2-execute @@ -2644,8 +2644,8 @@ public void testTemplatesExist() throws Exception { // tag::templates-exist-request-optionals request.setLocal(true); // <1> - request.setMasterNodeTimeout(TimeValue.timeValueMinutes(1)); // <2> - request.setMasterNodeTimeout("1m"); // <3> + request.setClusterManagerNodeTimeout(TimeValue.timeValueMinutes(1)); // <2> + request.setClusterManagerNodeTimeout("1m"); // <3> // end::templates-exist-request-optionals // tag::templates-exist-execute @@ -2973,7 +2973,7 @@ public void testDeleteAlias() throws Exception { request.setTimeout(TimeValue.timeValueMinutes(2)); // <1> // end::delete-alias-request-timeout // tag::delete-alias-request-masterTimeout - request.setMasterTimeout(TimeValue.timeValueMinutes(1)); // <1> + request.setClusterManagerTimeout(TimeValue.timeValueMinutes(1)); // <1> // end::delete-alias-request-masterTimeout // tag::delete-alias-execute diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/indices/CloseIndexRequestTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/indices/CloseIndexRequestTests.java index 5bfb0abab9f37..15ae0bbca910a 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/indices/CloseIndexRequestTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/indices/CloseIndexRequestTests.java @@ -75,15 +75,15 @@ public void testWaitForActiveShards() { public void testTimeout() { final CloseIndexRequest request = new CloseIndexRequest("index"); assertEquals(request.timeout(), TimedRequest.DEFAULT_ACK_TIMEOUT); - assertEquals(request.masterNodeTimeout(), TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT); + assertEquals(request.clusterManagerNodeTimeout(), TimedRequest.DEFAULT_CLUSTER_MANAGER_NODE_TIMEOUT); final TimeValue timeout = TimeValue.timeValueSeconds(randomIntBetween(0, 1000)); request.setTimeout(timeout); final TimeValue clusterManagerTimeout = TimeValue.timeValueSeconds(randomIntBetween(0, 1000)); - request.setMasterTimeout(clusterManagerTimeout); + request.setClusterManagerTimeout(clusterManagerTimeout); assertEquals(request.timeout(), timeout); - assertEquals(request.masterNodeTimeout(), clusterManagerTimeout); + assertEquals(request.clusterManagerNodeTimeout(), clusterManagerTimeout); } } diff --git a/client/rest/src/main/java/org/opensearch/client/Node.java b/client/rest/src/main/java/org/opensearch/client/Node.java index 952823cf29d6c..c02ac6c68718f 100644 --- a/client/rest/src/main/java/org/opensearch/client/Node.java +++ b/client/rest/src/main/java/org/opensearch/client/Node.java @@ -212,10 +212,19 @@ public Roles(final Set roles) { /** * Returns whether or not the node could be elected cluster-manager. */ - public boolean isMasterEligible() { + public boolean isClusterManagerEligible() { return roles.contains("master") || roles.contains("cluster_manager"); } + /** + * Returns whether or not the node could be elected cluster-manager. + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #isClusterManagerEligible()} + */ + @Deprecated + public boolean isMasterEligible() { + return isClusterManagerEligible(); + } + /** * Returns whether or not the node stores data. */ diff --git a/client/rest/src/main/java/org/opensearch/client/NodeSelector.java b/client/rest/src/main/java/org/opensearch/client/NodeSelector.java index 1d1c09f33fef7..a54c6b1514717 100644 --- a/client/rest/src/main/java/org/opensearch/client/NodeSelector.java +++ b/client/rest/src/main/java/org/opensearch/client/NodeSelector.java @@ -89,7 +89,9 @@ public void select(Iterable nodes) { for (Iterator itr = nodes.iterator(); itr.hasNext();) { Node node = itr.next(); if (node.getRoles() == null) continue; - if (node.getRoles().isMasterEligible() && false == node.getRoles().isData() && false == node.getRoles().isIngest()) { + if (node.getRoles().isClusterManagerEligible() + && false == node.getRoles().isData() + && false == node.getRoles().isIngest()) { itr.remove(); } } @@ -97,7 +99,7 @@ public void select(Iterable nodes) { @Override public String toString() { - return "SKIP_DEDICATED_MASTERS"; + return "SKIP_DEDICATED_CLUSTER_MANAGERS"; } }; } diff --git a/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferTests.java b/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferTests.java index 8cc6f5f006861..1d06e9353726d 100644 --- a/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferTests.java +++ b/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferTests.java @@ -287,7 +287,7 @@ private static SniffResponse buildSniffResponse(OpenSearchNodesSniffer.Scheme sc Collections.shuffle(roles, getRandom()); generator.writeArrayFieldStart("roles"); for (String role : roles) { - if ("cluster_manager".equals(role) && node.getRoles().isMasterEligible()) { + if ("cluster_manager".equals(role) && node.getRoles().isClusterManagerEligible()) { generator.writeString("cluster_manager"); } if ("data".equals(role) && node.getRoles().isData()) { From 197909c71f91f1e26da7bc6c7218b53fbb73cdae Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Fri, 22 Jul 2022 09:31:17 -0700 Subject: [PATCH 030/104] [Segment Replication] Wire up segment replication with peer recovery and add ITs. (#3743) * Add null check when computing max segment version. With segment replication enabled it is possible Lucene does not set the SegmentInfos min segment version, leaving the default value as null. Signed-off-by: Marc Handalian * Update peer recovery to set the translogUUID of replicas to the UUID generated on the primary. This change updates the UUID when the translog is created to the value stored in the passed segment userdata. This is to ensure during failover scenarios that the replica can be promoted and not have a uuid mismatch with the value stored in user data. Signed-off-by: Marc Handalian * Wire up Segment Replication under the feature flag. This PR wires up segment replication and adds some initial integration tests. Signed-off-by: Marc Handalian * Add test to ensure replicas use primary translog uuid with segrep. Signed-off-by: Marc Handalian * Update SegmentReplicationIT to assert previous commit points are valid and SegmentInfos can be built. Fix nitpicks in PR feedback. Signed-off-by: Marc Handalian * Fix test with Assert.fail to include a message. Signed-off-by: Marc Handalian --- .../replication/SegmentReplicationIT.java | 306 ++++++++++++++++++ .../opensearch/index/shard/IndexShard.java | 16 +- .../org/opensearch/index/store/Store.java | 7 +- .../cluster/IndicesClusterStateService.java | 20 +- .../indices/recovery/RecoveryTarget.java | 33 +- .../OngoingSegmentReplications.java | 6 +- .../SegmentReplicationSourceFactory.java | 8 +- .../SegmentReplicationSourceHandler.java | 8 + .../replication/SegmentReplicationTarget.java | 7 +- .../SegmentReplicationTargetService.java | 21 ++ .../checkpoint/ReplicationCheckpoint.java | 13 + .../replication/common/ReplicationTarget.java | 4 +- .../main/java/org/opensearch/node/Node.java | 2 + ...ClusterStateServiceRandomUpdatesTests.java | 2 + .../indices/recovery/RecoveryTests.java | 12 + .../OngoingSegmentReplicationsTests.java | 46 ++- .../snapshots/SnapshotResiliencyTests.java | 8 + 17 files changed, 484 insertions(+), 35 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java new file mode 100644 index 0000000000000..2c91eadafbee8 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java @@ -0,0 +1,306 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import com.carrotsearch.randomizedtesting.RandomizedTest; +import org.apache.lucene.index.SegmentInfos; +import org.junit.BeforeClass; +import org.opensearch.action.admin.indices.segments.IndexShardSegments; +import org.opensearch.action.admin.indices.segments.IndicesSegmentResponse; +import org.opensearch.action.admin.indices.segments.IndicesSegmentsRequest; +import org.opensearch.action.admin.indices.segments.ShardSegments; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.index.Index; +import org.opensearch.index.IndexModule; +import org.opensearch.index.IndexService; +import org.opensearch.index.engine.Segment; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.test.BackgroundIndexer; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class SegmentReplicationIT extends OpenSearchIntegTestCase { + + private static final String INDEX_NAME = "test-idx-1"; + private static final int SHARD_COUNT = 1; + private static final int REPLICA_COUNT = 1; + + @BeforeClass + public static void assumeFeatureFlag() { + assumeTrue("Segment replication Feature flag is enabled", Boolean.parseBoolean(System.getProperty(FeatureFlags.REPLICATION_TYPE))); + } + + @Override + public Settings indexSettings() { + return Settings.builder() + .put(super.indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, SHARD_COUNT) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, REPLICA_COUNT) + .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), false) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build(); + } + + @Override + protected boolean addMockInternalEngine() { + return false; + } + + public void testReplicationAfterPrimaryRefreshAndFlush() throws Exception { + final String nodeA = internalCluster().startNode(); + final String nodeB = internalCluster().startNode(); + createIndex(INDEX_NAME); + ensureGreen(INDEX_NAME); + + final int initialDocCount = scaledRandomIntBetween(0, 200); + try ( + BackgroundIndexer indexer = new BackgroundIndexer( + INDEX_NAME, + "_doc", + client(), + -1, + RandomizedTest.scaledRandomIntBetween(2, 5), + false, + random() + ) + ) { + indexer.start(initialDocCount); + waitForDocs(initialDocCount, indexer); + refresh(INDEX_NAME); + waitForReplicaUpdate(); + + assertHitCount(client(nodeA).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), initialDocCount); + assertHitCount(client(nodeB).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), initialDocCount); + + final int additionalDocCount = scaledRandomIntBetween(0, 200); + final int expectedHitCount = initialDocCount + additionalDocCount; + indexer.start(additionalDocCount); + waitForDocs(expectedHitCount, indexer); + + flushAndRefresh(INDEX_NAME); + waitForReplicaUpdate(); + assertHitCount(client(nodeA).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), expectedHitCount); + assertHitCount(client(nodeB).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), expectedHitCount); + + ensureGreen(INDEX_NAME); + assertSegmentStats(REPLICA_COUNT); + } + } + + public void testReplicationAfterForceMerge() throws Exception { + final String nodeA = internalCluster().startNode(); + final String nodeB = internalCluster().startNode(); + createIndex(INDEX_NAME); + ensureGreen(INDEX_NAME); + + final int initialDocCount = scaledRandomIntBetween(0, 200); + final int additionalDocCount = scaledRandomIntBetween(0, 200); + final int expectedHitCount = initialDocCount + additionalDocCount; + try ( + BackgroundIndexer indexer = new BackgroundIndexer( + INDEX_NAME, + "_doc", + client(), + -1, + RandomizedTest.scaledRandomIntBetween(2, 5), + false, + random() + ) + ) { + indexer.start(initialDocCount); + waitForDocs(initialDocCount, indexer); + + flush(INDEX_NAME); + waitForReplicaUpdate(); + // wait a short amount of time to give replication a chance to complete. + assertHitCount(client(nodeA).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), initialDocCount); + assertHitCount(client(nodeB).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), initialDocCount); + + // Index a second set of docs so we can merge into one segment. + indexer.start(additionalDocCount); + waitForDocs(expectedHitCount, indexer); + + // Force a merge here so that the in memory SegmentInfos does not reference old segments on disk. + client().admin().indices().prepareForceMerge(INDEX_NAME).setMaxNumSegments(1).setFlush(false).get(); + refresh(INDEX_NAME); + waitForReplicaUpdate(); + assertHitCount(client(nodeA).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), expectedHitCount); + assertHitCount(client(nodeB).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), expectedHitCount); + + ensureGreen(INDEX_NAME); + assertSegmentStats(REPLICA_COUNT); + } + } + + public void testStartReplicaAfterPrimaryIndexesDocs() throws Exception { + final String primaryNode = internalCluster().startNode(); + createIndex(INDEX_NAME, Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build()); + ensureGreen(INDEX_NAME); + + // Index a doc to create the first set of segments. _s1.si + client().prepareIndex(INDEX_NAME).setId("1").setSource("foo", "bar").get(); + // Flush segments to disk and create a new commit point (Primary: segments_3, _s1.si) + flushAndRefresh(INDEX_NAME); + assertHitCount(client(primaryNode).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), 1); + + // Index to create another segment + client().prepareIndex(INDEX_NAME).setId("2").setSource("foo", "bar").get(); + + // Force a merge here so that the in memory SegmentInfos does not reference old segments on disk. + client().admin().indices().prepareForceMerge(INDEX_NAME).setMaxNumSegments(1).setFlush(false).get(); + refresh(INDEX_NAME); + + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)) + ); + final String replicaNode = internalCluster().startNode(); + ensureGreen(INDEX_NAME); + + client().prepareIndex(INDEX_NAME).setId("3").setSource("foo", "bar").get(); + + waitForReplicaUpdate(); + assertHitCount(client(primaryNode).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), 3); + assertHitCount(client(replicaNode).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), 3); + + final Index index = resolveIndex(INDEX_NAME); + IndexShard primaryShard = getIndexShard(index, primaryNode); + IndexShard replicaShard = getIndexShard(index, replicaNode); + assertEquals( + primaryShard.translogStats().estimatedNumberOfOperations(), + replicaShard.translogStats().estimatedNumberOfOperations() + ); + assertSegmentStats(REPLICA_COUNT); + } + + private void assertSegmentStats(int numberOfReplicas) throws IOException { + final IndicesSegmentResponse indicesSegmentResponse = client().admin().indices().segments(new IndicesSegmentsRequest()).actionGet(); + + List segmentsByIndex = getShardSegments(indicesSegmentResponse); + + // There will be an entry in the list for each index. + for (ShardSegments[] replicationGroupSegments : segmentsByIndex) { + + // Separate Primary & replica shards ShardSegments. + final Map> segmentListMap = segmentsByShardType(replicationGroupSegments); + final List primaryShardSegmentsList = segmentListMap.get(true); + final List replicaShardSegments = segmentListMap.get(false); + + assertEquals("There should only be one primary in the replicationGroup", primaryShardSegmentsList.size(), 1); + final ShardSegments primaryShardSegments = primaryShardSegmentsList.stream().findFirst().get(); + final Map latestPrimarySegments = getLatestSegments(primaryShardSegments); + + assertEquals( + "There should be a ShardSegment entry for each replica in the replicationGroup", + numberOfReplicas, + replicaShardSegments.size() + ); + + for (ShardSegments shardSegment : replicaShardSegments) { + final Map latestReplicaSegments = getLatestSegments(shardSegment); + for (Segment replicaSegment : latestReplicaSegments.values()) { + final Segment primarySegment = latestPrimarySegments.get(replicaSegment.getName()); + assertEquals(replicaSegment.getGeneration(), primarySegment.getGeneration()); + assertEquals(replicaSegment.getNumDocs(), primarySegment.getNumDocs()); + assertEquals(replicaSegment.getDeletedDocs(), primarySegment.getDeletedDocs()); + assertEquals(replicaSegment.getSize(), primarySegment.getSize()); + } + + // Fetch the IndexShard for this replica and try and build its SegmentInfos from the previous commit point. + // This ensures the previous commit point is not wiped. + final ShardRouting replicaShardRouting = shardSegment.getShardRouting(); + ClusterState state = client(internalCluster().getMasterName()).admin().cluster().prepareState().get().getState(); + final DiscoveryNode replicaNode = state.nodes().resolveNode(replicaShardRouting.currentNodeId()); + final Index index = resolveIndex(INDEX_NAME); + IndexShard indexShard = getIndexShard(index, replicaNode.getName()); + final String lastCommitSegmentsFileName = SegmentInfos.getLastCommitSegmentsFileName(indexShard.store().directory()); + // calls to readCommit will fail if a valid commit point and all its segments are not in the store. + SegmentInfos.readCommit(indexShard.store().directory(), lastCommitSegmentsFileName); + } + } + } + + /** + * Waits until the replica is caught up to the latest primary segments gen. + * @throws Exception + */ + private void waitForReplicaUpdate() throws Exception { + // wait until the replica has the latest segment generation. + assertBusy(() -> { + final IndicesSegmentResponse indicesSegmentResponse = client().admin() + .indices() + .segments(new IndicesSegmentsRequest()) + .actionGet(); + List segmentsByIndex = getShardSegments(indicesSegmentResponse); + for (ShardSegments[] replicationGroupSegments : segmentsByIndex) { + final Map> segmentListMap = segmentsByShardType(replicationGroupSegments); + final List primaryShardSegmentsList = segmentListMap.get(true); + final List replicaShardSegments = segmentListMap.get(false); + + final ShardSegments primaryShardSegments = primaryShardSegmentsList.stream().findFirst().get(); + final Map latestPrimarySegments = getLatestSegments(primaryShardSegments); + final Long latestPrimaryGen = latestPrimarySegments.values().stream().findFirst().map(Segment::getGeneration).get(); + for (ShardSegments shardSegments : replicaShardSegments) { + final boolean isReplicaCaughtUpToPrimary = shardSegments.getSegments() + .stream() + .anyMatch(segment -> segment.getGeneration() == latestPrimaryGen); + assertTrue(isReplicaCaughtUpToPrimary); + } + } + }); + } + + private IndexShard getIndexShard(Index index, String node) { + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node); + IndexService indexService = indicesService.indexServiceSafe(index); + final Optional shardId = indexService.shardIds().stream().findFirst(); + return indexService.getShard(shardId.get()); + } + + private List getShardSegments(IndicesSegmentResponse indicesSegmentResponse) { + return indicesSegmentResponse.getIndices() + .values() + .stream() // get list of IndexSegments + .flatMap(is -> is.getShards().values().stream()) // Map to shard replication group + .map(IndexShardSegments::getShards) // get list of segments across replication group + .collect(Collectors.toList()); + } + + private Map getLatestSegments(ShardSegments segments) { + final Long latestPrimaryGen = segments.getSegments().stream().map(Segment::getGeneration).max(Long::compare).get(); + return segments.getSegments() + .stream() + .filter(s -> s.getGeneration() == latestPrimaryGen) + .collect(Collectors.toMap(Segment::getName, Function.identity())); + } + + private Map> segmentsByShardType(ShardSegments[] replicationGroupSegments) { + return Arrays.stream(replicationGroupSegments).collect(Collectors.groupingBy(s -> s.getShardRouting().primary())); + } +} diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index fbe81b9320de5..9694f3dd37f80 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -1396,9 +1396,13 @@ public GatedCloseable acquireSafeIndexCommit() throws EngineExcepti } /** - * Returns the lastest Replication Checkpoint that shard received + * Returns the lastest Replication Checkpoint that shard received. Shards will return an EMPTY checkpoint before + * the engine is opened. */ public ReplicationCheckpoint getLatestReplicationCheckpoint() { + if (getEngineOrNull() == null) { + return ReplicationCheckpoint.empty(shardId); + } try (final GatedCloseable snapshot = getSegmentInfosSnapshot()) { return Optional.ofNullable(snapshot.get()) .map( @@ -1410,15 +1414,7 @@ public ReplicationCheckpoint getLatestReplicationCheckpoint() { segmentInfos.getVersion() ) ) - .orElse( - new ReplicationCheckpoint( - shardId, - getOperationPrimaryTerm(), - SequenceNumbers.NO_OPS_PERFORMED, - getProcessedLocalCheckpoint(), - SequenceNumbers.NO_OPS_PERFORMED - ) - ); + .orElse(ReplicationCheckpoint.empty(shardId)); } catch (IOException ex) { throw new OpenSearchException("Error Closing SegmentInfos Snapshot", ex); } diff --git a/server/src/main/java/org/opensearch/index/store/Store.java b/server/src/main/java/org/opensearch/index/store/Store.java index 2309004c0777d..6828ab7d91b2c 100644 --- a/server/src/main/java/org/opensearch/index/store/Store.java +++ b/server/src/main/java/org/opensearch/index/store/Store.java @@ -1003,7 +1003,12 @@ static LoadedMetadata loadMetadata(SegmentInfos segmentInfos, Directory director // version is written since 3.1+: we should have already hit IndexFormatTooOld. throw new IllegalArgumentException("expected valid version value: " + info.info.toString()); } - if (version.onOrAfter(maxVersion)) { + // With segment replication enabled, we compute metadata snapshots from the latest in memory infos. + // In this case we will have SegmentInfos objects fetched from the primary's reader + // where the minSegmentLuceneVersion can be null even though there are segments. + // This is because the SegmentInfos object is not read from a commit/IndexInput, which sets + // minSegmentLuceneVersion. + if (maxVersion == null || version.onOrAfter(maxVersion)) { maxVersion = version; } for (String file : info.files()) { diff --git a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java index 33d3bc2ba07b0..8884ef2cddd0a 100644 --- a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java @@ -56,6 +56,7 @@ import org.opensearch.common.inject.Inject; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.env.ShardLockObtainFailedException; @@ -80,6 +81,7 @@ import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoveryListener; import org.opensearch.indices.recovery.RecoveryState; +import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.common.ReplicationState; import org.opensearch.repositories.RepositoriesService; @@ -90,6 +92,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -134,7 +137,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple private final FailedShardHandler failedShardHandler = new FailedShardHandler(); private final boolean sendRefreshMapping; - private final List buildInIndexListener; + private final List builtInIndexListener; private final PrimaryReplicaSyncer primaryReplicaSyncer; private final Consumer globalCheckpointSyncer; private final RetentionLeaseSyncer retentionLeaseSyncer; @@ -148,6 +151,7 @@ public IndicesClusterStateService( final ClusterService clusterService, final ThreadPool threadPool, final PeerRecoveryTargetService recoveryTargetService, + final SegmentReplicationTargetService segmentReplicationTargetService, final ShardStateAction shardStateAction, final NodeMappingRefreshAction nodeMappingRefreshAction, final RepositoriesService repositoriesService, @@ -165,6 +169,7 @@ public IndicesClusterStateService( clusterService, threadPool, checkpointPublisher, + segmentReplicationTargetService, recoveryTargetService, shardStateAction, nodeMappingRefreshAction, @@ -185,6 +190,7 @@ public IndicesClusterStateService( final ClusterService clusterService, final ThreadPool threadPool, final SegmentReplicationCheckpointPublisher checkpointPublisher, + final SegmentReplicationTargetService segmentReplicationTargetService, final PeerRecoveryTargetService recoveryTargetService, final ShardStateAction shardStateAction, final NodeMappingRefreshAction nodeMappingRefreshAction, @@ -198,7 +204,15 @@ public IndicesClusterStateService( ) { this.settings = settings; this.checkpointPublisher = checkpointPublisher; - this.buildInIndexListener = Arrays.asList(peerRecoverySourceService, recoveryTargetService, searchService, snapshotShardsService); + + final List indexEventListeners = new ArrayList<>( + Arrays.asList(peerRecoverySourceService, recoveryTargetService, searchService, snapshotShardsService) + ); + // if segrep feature flag is not enabled, don't wire the target serivce as an IndexEventListener. + if (FeatureFlags.isEnabled(FeatureFlags.REPLICATION_TYPE)) { + indexEventListeners.add(segmentReplicationTargetService); + } + this.builtInIndexListener = Collections.unmodifiableList(indexEventListeners); this.indicesService = indicesService; this.clusterService = clusterService; this.threadPool = threadPool; @@ -514,7 +528,7 @@ private void createIndices(final ClusterState state) { AllocatedIndex indexService = null; try { - indexService = indicesService.createIndex(indexMetadata, buildInIndexListener, true); + indexService = indicesService.createIndex(indexMetadata, builtInIndexListener, true); if (indexService.updateMapping(null, indexMetadata) && sendRefreshMapping) { nodeMappingRefreshAction.nodeMappingRefresh( state.nodes().getClusterManagerNode(), diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java index 426409f7a5b65..652f3c9a55f53 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java @@ -62,10 +62,13 @@ import org.opensearch.indices.replication.common.ReplicationCollection; import java.io.IOException; +import java.nio.channels.FileChannel; import java.nio.file.Path; import java.util.List; import java.util.concurrent.CountDownLatch; +import static org.opensearch.index.translog.Translog.TRANSLOG_UUID_KEY; + /** * Represents a recovery where the current node is the target node of the recovery. To track recoveries in a central place, instances of * this class are created through {@link ReplicationCollection}. @@ -398,13 +401,29 @@ public void cleanFiles( store.incRef(); try { store.cleanupAndVerify("recovery CleanFilesRequestHandler", sourceMetadata); - final String translogUUID = Translog.createEmptyTranslog( - indexShard.shardPath().resolveTranslog(), - globalCheckpoint, - shardId(), - indexShard.getPendingPrimaryTerm() - ); - store.associateIndexWithNewTranslog(translogUUID); + + // If Segment Replication is enabled, we need to reuse the primary's translog UUID already stored in the index. + // With Segrep, replicas should never create their own commit points. This ensures the index and xlog share the same + // UUID without the extra step to associate the index with a new xlog. + if (indexShard.indexSettings().isSegRepEnabled()) { + final String translogUUID = store.getMetadata().getCommitUserData().get(TRANSLOG_UUID_KEY); + Translog.createEmptyTranslog( + indexShard.shardPath().resolveTranslog(), + shardId(), + globalCheckpoint, + indexShard.getPendingPrimaryTerm(), + translogUUID, + FileChannel::open + ); + } else { + final String translogUUID = Translog.createEmptyTranslog( + indexShard.shardPath().resolveTranslog(), + globalCheckpoint, + shardId(), + indexShard.getPendingPrimaryTerm() + ); + store.associateIndexWithNewTranslog(translogUUID); + } if (indexShard.getRetentionLeases().leases().isEmpty()) { // if empty, may be a fresh IndexShard, so write an empty leases file to disk diff --git a/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java b/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java index 6302d364fc6d1..a9b032c98b70f 100644 --- a/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java +++ b/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java @@ -113,7 +113,11 @@ void startSegmentCopy(GetSegmentFilesRequest request, ActionListener { + final String targetAllocationId = request.getTargetAllocationId(); + RunUnderPrimaryPermit.run( + () -> shard.markAllocationIdAsInSync(targetAllocationId, request.getCheckpoint().getSeqNo()), + shard.shardId() + " marking " + targetAllocationId + " as in sync", + shard, + cancellableThreads, + logger + ); try { future.onResponse(new GetSegmentFilesResponse(List.of(storeFileMetadata))); } finally { diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java index fb68e59f3b2ef..516cfa91a787b 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java @@ -181,11 +181,8 @@ private void getFiles(CheckpointInfoResponse checkpointInfo, StepListener listener) { diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java index f9b40d14b0d53..f699f0edba842 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java @@ -53,6 +53,27 @@ public class SegmentReplicationTargetService implements IndexEventListener { private final Map latestReceivedCheckpoint = new HashMap<>(); + // Empty Implementation, only required while Segment Replication is under feature flag. + public static final SegmentReplicationTargetService NO_OP = new SegmentReplicationTargetService() { + @Override + public void beforeIndexShardClosed(ShardId shardId, IndexShard indexShard, Settings indexSettings) { + // NoOp; + } + + @Override + public synchronized void onNewCheckpoint(ReplicationCheckpoint receivedCheckpoint, IndexShard replicaShard) { + // noOp; + } + }; + + // Used only for empty implementation. + private SegmentReplicationTargetService() { + threadPool = null; + recoverySettings = null; + onGoingReplications = null; + sourceFactory = null; + } + /** * The internal actions * diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java index f84a65206190b..abcef1bd91944 100644 --- a/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java @@ -12,6 +12,7 @@ import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.io.stream.Writeable; +import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.ShardId; import java.io.IOException; @@ -30,6 +31,18 @@ public class ReplicationCheckpoint implements Writeable { private final long seqNo; private final long segmentInfosVersion; + public static ReplicationCheckpoint empty(ShardId shardId) { + return new ReplicationCheckpoint(shardId); + } + + private ReplicationCheckpoint(ShardId shardId) { + this.shardId = shardId; + primaryTerm = SequenceNumbers.UNASSIGNED_PRIMARY_TERM; + segmentsGen = SequenceNumbers.NO_OPS_PERFORMED; + seqNo = SequenceNumbers.NO_OPS_PERFORMED; + segmentInfosVersion = SequenceNumbers.NO_OPS_PERFORMED; + } + public ReplicationCheckpoint(ShardId shardId, long primaryTerm, long segmentsGen, long seqNo, long segmentInfosVersion) { this.shardId = shardId; this.primaryTerm = primaryTerm; diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java index 27e23ceafb15e..501ff46eeb2ff 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java @@ -49,7 +49,6 @@ public abstract class ReplicationTarget extends AbstractRefCounted { private final long id; protected final AtomicBoolean finished = new AtomicBoolean(); - private final ShardId shardId; protected final IndexShard indexShard; protected final Store store; protected final ReplicationListener listener; @@ -89,7 +88,6 @@ public ReplicationTarget(String name, IndexShard indexShard, ReplicationLuceneIn this.stateIndex = stateIndex; this.indexShard = indexShard; this.store = indexShard.store(); - this.shardId = indexShard.shardId(); // make sure the store is not released until we are done. this.cancellableThreads = new CancellableThreads(); store.incRef(); @@ -131,7 +129,7 @@ public Store store() { } public ShardId shardId() { - return shardId; + return indexShard.shardId(); } /** diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index eb2604af0147b..6cc8128cffd52 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -953,6 +953,8 @@ protected Node( ); b.bind(SegmentReplicationSourceService.class) .toInstance(new SegmentReplicationSourceService(indicesService, transportService, recoverySettings)); + } else { + b.bind(SegmentReplicationTargetService.class).toInstance(SegmentReplicationTargetService.NO_OP); } } b.bind(HttpServerTransport.class).toInstance(httpServerTransport); diff --git a/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index 903bdccef5587..939e8b2d4e782 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -66,6 +66,7 @@ import org.opensearch.index.shard.PrimaryReplicaSyncer; import org.opensearch.index.shard.ShardId; import org.opensearch.indices.recovery.PeerRecoveryTargetService; +import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.repositories.RepositoriesService; import org.opensearch.threadpool.TestThreadPool; @@ -566,6 +567,7 @@ private IndicesClusterStateService createIndicesClusterStateService( clusterService, threadPool, SegmentReplicationCheckpointPublisher.EMPTY, + SegmentReplicationTargetService.NO_OP, recoveryTargetService, shardStateAction, null, diff --git a/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java index 5224a54a35e96..3ea74dbf38919 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java @@ -71,6 +71,7 @@ import org.opensearch.index.translog.Translog; import org.opensearch.indices.replication.common.ReplicationListener; import org.opensearch.indices.replication.common.ReplicationState; +import org.opensearch.indices.replication.common.ReplicationType; import java.io.IOException; import java.util.HashMap; @@ -103,6 +104,17 @@ public void testTranslogHistoryTransferred() throws Exception { } } + public void testWithSegmentReplication_ReplicaUsesPrimaryTranslogUUID() throws Exception { + Settings settings = Settings.builder().put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT).build(); + try (ReplicationGroup shards = createGroup(2, settings)) { + shards.startAll(); + final String expectedUUID = getTranslog(shards.getPrimary()).getTranslogUUID(); + assertTrue( + shards.getReplicas().stream().allMatch(indexShard -> getTranslog(indexShard).getTranslogUUID().equals(expectedUUID)) + ); + } + } + public void testRetentionPolicyChangeDuringRecovery() throws Exception { try (ReplicationGroup shards = createGroup(0)) { shards.startPrimary(); diff --git a/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java b/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java index 260f6a13b5010..d42e75871a45a 100644 --- a/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java @@ -37,6 +37,7 @@ import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; import static org.mockito.Mockito.when; public class OngoingSegmentReplicationsTests extends IndexShardTestCase { @@ -126,7 +127,7 @@ public void onResponse(GetSegmentFilesResponse getSegmentFilesResponse) { @Override public void onFailure(Exception e) { logger.error("Unexpected failure", e); - Assert.fail(); + Assert.fail("Unexpected failure from startSegmentCopy listener: " + e); } }); } @@ -228,4 +229,47 @@ public void testShardAlreadyReplicatingToNode() throws IOException { replications.prepareForReplication(request, segmentSegmentFileChunkWriter); assertThrows(OpenSearchException.class, () -> { replications.prepareForReplication(request, segmentSegmentFileChunkWriter); }); } + + public void testStartReplicationWithNoFilesToFetch() throws IOException { + // create a replications object and request a checkpoint. + OngoingSegmentReplications replications = spy(new OngoingSegmentReplications(mockIndicesService, recoverySettings)); + final CheckpointInfoRequest request = new CheckpointInfoRequest( + 1L, + replica.routingEntry().allocationId().getId(), + replicaDiscoveryNode, + testCheckpoint + ); + // mock the FileChunkWriter so we can assert its ever called. + final FileChunkWriter segmentSegmentFileChunkWriter = mock(FileChunkWriter.class); + // Prepare for replication step - and ensure copyState is added to cache. + final CopyState copyState = replications.prepareForReplication(request, segmentSegmentFileChunkWriter); + assertTrue(replications.isInCopyStateMap(request.getCheckpoint())); + assertEquals(1, replications.size()); + assertEquals(1, copyState.refCount()); + + getSegmentFilesRequest = new GetSegmentFilesRequest( + 1L, + replica.routingEntry().allocationId().getId(), + replicaDiscoveryNode, + Collections.emptyList(), + testCheckpoint + ); + + // invoke startSegmentCopy and assert our fileChunkWriter is never invoked. + replications.startSegmentCopy(getSegmentFilesRequest, new ActionListener<>() { + @Override + public void onResponse(GetSegmentFilesResponse getSegmentFilesResponse) { + assertEquals(Collections.emptyList(), getSegmentFilesResponse.files); + assertEquals(0, copyState.refCount()); + assertFalse(replications.isInCopyStateMap(request.getCheckpoint())); + verifyNoInteractions(segmentSegmentFileChunkWriter); + } + + @Override + public void onFailure(Exception e) { + logger.error("Unexpected failure", e); + Assert.fail(); + } + }); + } } diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index e20fd347fc3fd..cca27366f7df0 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -183,6 +183,8 @@ import org.opensearch.indices.recovery.PeerRecoverySourceService; import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.indices.replication.SegmentReplicationSourceFactory; +import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.ingest.IngestService; import org.opensearch.monitor.StatusInfo; @@ -1840,6 +1842,12 @@ public void onFailure(final Exception e) { clusterService, threadPool, new PeerRecoveryTargetService(threadPool, transportService, recoverySettings, clusterService), + new SegmentReplicationTargetService( + threadPool, + recoverySettings, + transportService, + new SegmentReplicationSourceFactory(transportService, recoverySettings, clusterService) + ), shardStateAction, new NodeMappingRefreshAction(transportService, metadataMappingService), repositoriesService, From 931813f8cf512260430101495905bcd0b7602c11 Mon Sep 17 00:00:00 2001 From: Hauck <67768441+hauck-jvsh@users.noreply.github.com> Date: Fri, 22 Jul 2022 14:37:50 -0300 Subject: [PATCH 031/104] Adds a new parameter, max_analyzer_offset, for the highlighter (#3893) * #3842 adds a new parameter to the highlighter, the max_analyzer_offset. When this parameter is provided the highlight stops in its value. This prevents the highlighter to go beyond the index maxAnalyzedOffset. Signed-off-by: Hauck * Adds a test for the new parameter Signed-off-by: Hauck * Fix the test add in the previous commit; Signed-off-by: Hauck * This was checking against the wrong field Signed-off-by: Hauck * Only runs the test for the correct version Signed-off-by: Hauck * Skips the test in Elasticsearch as well; Signed-off-by: Hauck * Remove elastic 3.0 to test Signed-off-by: Hauck * Skips all versions Signed-off-by: Hauck * Remove unnecessary fields as pointed by @reta Signed-off-by: Hauck * Compute if fieldMaxAnalyzedIsNotValid in the constructor as suggest by @reta Signed-off-by: Hauck * As discussed, it is better to throws different exceptions for when the fieldMaxAnalyzed is not valid and for when it is disabled; Signed-off-by: Hauck * hint what to do to allow highlight of bigger documents Signed-off-by: Hauck * Let the user define the new parameter globally for all fields highlighted Signed-off-by: Hauck * Change the fieldMaxAnalyzedOffset Integer in order to use null when it is absent in highlight. This allows the error messages to much more precise, showing invalid for all negative numbers; Signed-off-by: Hauck * Update javadocs and implements the stream methods for the new fields; Signed-off-by: Hauck * builder.field do not accept null, so check before calling the method is necessary Signed-off-by: Hauck * Only send and read the new fields if the version supports it Signed-off-by: Hauck * the previous commit was checking the wrong field Signed-off-by: Hauck * Check for version 3.0.0 instead of current version Signed-off-by: Hauck * Update server/src/main/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighter.java Co-authored-by: Andriy Redko Signed-off-by: Hauck * Execute the test after version 3.0.0 Signed-off-by: Hauck Co-authored-by: Andriy Redko --- .../AnnotatedTextHighlighterTests.java | 5 +-- .../30_max_analyzed_offset.yml | 11 ++++++ .../uhighlight/CustomUnifiedHighlighter.java | 23 +++++++++++-- .../highlight/AbstractHighlighterBuilder.java | 34 +++++++++++++++++++ .../subphase/highlight/HighlightBuilder.java | 4 +++ .../highlight/SearchHighlightContext.java | 18 ++++++++++ .../highlight/UnifiedHighlighter.java | 25 +++++++++++++- .../CustomUnifiedHighlighterTests.java | 5 +-- 8 files changed, 117 insertions(+), 8 deletions(-) diff --git a/plugins/mapper-annotated-text/src/test/java/org/opensearch/search/fetch/subphase/highlight/AnnotatedTextHighlighterTests.java b/plugins/mapper-annotated-text/src/test/java/org/opensearch/search/fetch/subphase/highlight/AnnotatedTextHighlighterTests.java index d2d009f1b28b8..17a1abc64e8cd 100644 --- a/plugins/mapper-annotated-text/src/test/java/org/opensearch/search/fetch/subphase/highlight/AnnotatedTextHighlighterTests.java +++ b/plugins/mapper-annotated-text/src/test/java/org/opensearch/search/fetch/subphase/highlight/AnnotatedTextHighlighterTests.java @@ -41,7 +41,6 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; @@ -56,6 +55,7 @@ import org.apache.lucene.search.uhighlight.Snippet; import org.apache.lucene.search.uhighlight.SplittingBreakIterator; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.opensearch.common.Strings; import org.opensearch.index.mapper.annotatedtext.AnnotatedTextFieldMapper.AnnotatedHighlighterAnalyzer; import org.opensearch.index.mapper.annotatedtext.AnnotatedTextFieldMapper.AnnotatedText; @@ -136,7 +136,8 @@ private void assertHighlightOneDoc( noMatchSize, expectedPassages.length, name -> "text".equals(name), - Integer.MAX_VALUE + Integer.MAX_VALUE, + null ); highlighter.setFieldMatcher((name) -> "text".equals(name)); final Snippet[] snippets = highlighter.highlightField(getOnlyLeafReader(reader), topDocs.scoreDocs[0].doc, () -> rawValue); diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/30_max_analyzed_offset.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/30_max_analyzed_offset.yml index 462f4f5d25e0b..c38afc96590e9 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/30_max_analyzed_offset.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/30_max_analyzed_offset.yml @@ -36,6 +36,17 @@ setup: body: {"query" : {"match" : {"field1" : "fox"}}, "highlight" : {"type" : "unified", "fields" : {"field1" : {}}}} - match: { error.root_cause.0.type: "illegal_argument_exception" } +--- +"Unified highlighter on a field WITHOUT OFFSETS using max_analyzer_offset should SUCCEED": + - skip: + version: " - 2.99.99" + reason: only starting supporting the parameter max_analyzer_offset on version 3.0 + - do: + search: + rest_total_hits_as_int: true + index: test1 + body: {"query" : {"match" : {"field1" : "quick"}}, "highlight" : {"type" : "unified", "fields" : {"field1" : {"max_analyzer_offset": 10}}}} + - match: {hits.hits.0.highlight.field1.0: "The quick brown fox went to the forest and saw another fox."} --- "Plain highlighter on a field WITHOUT OFFSETS exceeding index.highlight.max_analyzed_offset should FAIL": diff --git a/server/src/main/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighter.java b/server/src/main/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighter.java index fb22eb583d9e1..cd4ee121f0f29 100644 --- a/server/src/main/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighter.java +++ b/server/src/main/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighter.java @@ -43,7 +43,6 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; -import org.apache.lucene.search.uhighlight.UnifiedHighlighter.HighlightFlag; import org.apache.lucene.util.BytesRef; import org.opensearch.common.CheckedSupplier; import org.opensearch.common.Nullable; @@ -79,6 +78,7 @@ public class CustomUnifiedHighlighter extends UnifiedHighlighter { private final int noMatchSize; private final FieldHighlighter fieldHighlighter; private final int maxAnalyzedOffset; + private final Integer fieldMaxAnalyzedOffset; /** * Creates a new instance of {@link CustomUnifiedHighlighter} @@ -99,6 +99,7 @@ public class CustomUnifiedHighlighter extends UnifiedHighlighter { * @param fieldMatcher decides which terms should be highlighted * @param maxAnalyzedOffset if the field is more than this long we'll refuse to use the ANALYZED * offset source for it because it'd be super slow + * @param fieldMaxAnalyzedOffset this is used to limit the length of input that will be ANALYZED, this allows bigger fields to be partially highligthed */ public CustomUnifiedHighlighter( IndexSearcher searcher, @@ -113,7 +114,8 @@ public CustomUnifiedHighlighter( int noMatchSize, int maxPassages, Predicate fieldMatcher, - int maxAnalyzedOffset + int maxAnalyzedOffset, + Integer fieldMaxAnalyzedOffset ) throws IOException { super(searcher, analyzer); this.offsetSource = offsetSource; @@ -126,6 +128,7 @@ public CustomUnifiedHighlighter( this.setFieldMatcher(fieldMatcher); this.maxAnalyzedOffset = maxAnalyzedOffset; fieldHighlighter = getFieldHighlighter(field, query, extractTerms(query), maxPassages); + this.fieldMaxAnalyzedOffset = fieldMaxAnalyzedOffset; } /** @@ -141,7 +144,21 @@ public Snippet[] highlightField(LeafReader reader, int docId, CheckedSupplier maxAnalyzedOffset)) { + + if (fieldMaxAnalyzedOffset != null && fieldMaxAnalyzedOffset > maxAnalyzedOffset) { + throw new IllegalArgumentException( + "max_analyzer_offset has exceeded [" + + maxAnalyzedOffset + + "] - maximum allowed to be analyzed for highlighting. " + + "This maximum can be set by changing the [" + + IndexSettings.MAX_ANALYZED_OFFSET_SETTING.getKey() + + "] index level setting. " + + "For large texts, indexing with offsets or term vectors is recommended!" + ); + } + // if fieldMaxAnalyzedOffset is not defined + // and if this happens we should fallback to the previous behavior + if ((offsetSource == OffsetSource.ANALYSIS) && (fieldValueLength > maxAnalyzedOffset && fieldMaxAnalyzedOffset == null)) { throw new IllegalArgumentException( "The length of [" + field diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java index e935d46ea1fb0..db0089fd5f180 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java @@ -34,6 +34,7 @@ import org.apache.lucene.search.highlight.SimpleFragmenter; import org.apache.lucene.search.highlight.SimpleSpanFragmenter; +import org.opensearch.Version; import org.opensearch.common.ParseField; import org.opensearch.common.ParsingException; import org.opensearch.common.Strings; @@ -92,6 +93,7 @@ public abstract class AbstractHighlighterBuilder template, QueryBuilder queryBuilder) { @@ -150,6 +154,7 @@ protected AbstractHighlighterBuilder(AbstractHighlighterBuilder template, Que phraseLimit = template.phraseLimit; options = template.options; requireFieldMatch = template.requireFieldMatch; + maxAnalyzerOffset = template.maxAnalyzerOffset; } /** @@ -181,7 +186,13 @@ protected AbstractHighlighterBuilder(StreamInput in) throws IOException { if (in.readBoolean()) { options(in.readMap()); } + requireFieldMatch(in.readOptionalBoolean()); + + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + maxAnalyzerOffset(in.readOptionalVInt()); + } + } /** @@ -223,6 +234,9 @@ public final void writeTo(StreamOutput out) throws IOException { out.writeMap(options); } out.writeOptionalBoolean(requireFieldMatch); + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeOptionalVInt(maxAnalyzerOffset); + } doWriteTo(out); } @@ -542,6 +556,21 @@ public Integer phraseLimit() { return this.phraseLimit; } + /** + * Sets the maximum offset for the highlighter + * @param maxAnalyzerOffset the maximum offset that the highlighter will consider + * @return this for chaining + */ + @SuppressWarnings("unchecked") + public HB maxAnalyzerOffset(Integer maxAnalyzerOffset) { + this.maxAnalyzerOffset = maxAnalyzerOffset; + return (HB) this; + } + + public Integer maxAnalyzerOffset() { + return this.maxAnalyzerOffset; + } + /** * Forces the highlighting to highlight fields based on the source even if fields are stored separately. */ @@ -623,6 +652,10 @@ void commonOptionsToXContent(XContentBuilder builder) throws IOException { if (phraseLimit != null) { builder.field(PHRASE_LIMIT_FIELD.getPreferredName(), phraseLimit); } + if (maxAnalyzerOffset != null) { + builder.field(MAX_ANALYZER_OFFSET_FIELD.getPreferredName(), maxAnalyzerOffset); + } + } static > BiFunction setupParser(ObjectParser parser) { @@ -642,6 +675,7 @@ static > BiFunction { try { return p.map(); diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/HighlightBuilder.java b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/HighlightBuilder.java index a8a7d290c827e..7e7e240362a1a 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/HighlightBuilder.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/HighlightBuilder.java @@ -399,6 +399,10 @@ private static void transferOptions( if (highlighterBuilder.highlightQuery != null) { targetOptionsBuilder.highlightQuery(highlighterBuilder.highlightQuery.toQuery(context)); } + if (highlighterBuilder.maxAnalyzerOffset != null) { + targetOptionsBuilder.maxAnalyzerOffset(highlighterBuilder.maxAnalyzerOffset); + } + } static Character[] convertCharArray(char[] array) { diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/SearchHighlightContext.java b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/SearchHighlightContext.java index 33a5397ae964b..7464ba094b97e 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/SearchHighlightContext.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/SearchHighlightContext.java @@ -154,6 +154,12 @@ public static class FieldOptions { private int phraseLimit = -1; + private Integer maxAnalyzerOffset = null; + + public Integer maxAnalyzerOffset() { + return maxAnalyzerOffset; + } + public int fragmentCharSize() { return fragmentCharSize; } @@ -333,6 +339,15 @@ Builder phraseLimit(int phraseLimit) { return this; } + Builder maxAnalyzerOffset(Integer maxAnalyzerOffset) { + // throws an execption if the value is not a positive integer + if (maxAnalyzerOffset != null && maxAnalyzerOffset <= 0) { + throw new IllegalArgumentException("the value [" + maxAnalyzerOffset + "] of max_analyzer_offset is invalid"); + } + fieldOptions.maxAnalyzerOffset = maxAnalyzerOffset; + return this; + } + Builder matchedFields(Set matchedFields) { fieldOptions.matchedFields = matchedFields; return this; @@ -405,6 +420,9 @@ Builder merge(FieldOptions globalOptions) { if (fieldOptions.phraseLimit == -1) { fieldOptions.phraseLimit = globalOptions.phraseLimit; } + if (fieldOptions.maxAnalyzerOffset == null) { + fieldOptions.maxAnalyzerOffset = globalOptions.maxAnalyzerOffset; + } return this; } } diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/UnifiedHighlighter.java b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/UnifiedHighlighter.java index ddf32064d7f59..5efaa7c9f766b 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/UnifiedHighlighter.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/UnifiedHighlighter.java @@ -32,6 +32,8 @@ package org.opensearch.search.fetch.subphase.highlight; import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.AnalyzerWrapper; +import org.apache.lucene.analysis.miscellaneous.LimitTokenOffsetFilter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.highlight.Encoder; import org.apache.lucene.search.uhighlight.BoundedBreakIteratorScanner; @@ -133,13 +135,33 @@ public HighlightField highlight(FieldHighlightContext fieldContext) throws IOExc return new HighlightField(fieldContext.fieldName, Text.convertFromStringArray(fragments)); } + public AnalyzerWrapper getLimitedOffsetAnalyzer(Analyzer analyzer, int limit) { + return new AnalyzerWrapper(analyzer.getReuseStrategy()) { + @Override + protected Analyzer getWrappedAnalyzer(String fieldName) { + return analyzer; + } + + @Override + protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) { + return new TokenStreamComponents(components.getSource(), new LimitTokenOffsetFilter(components.getTokenStream(), limit)); + } + + }; + + } + CustomUnifiedHighlighter buildHighlighter(FieldHighlightContext fieldContext) throws IOException { Encoder encoder = fieldContext.field.fieldOptions().encoder().equals("html") ? HighlightUtils.Encoders.HTML : HighlightUtils.Encoders.DEFAULT; int maxAnalyzedOffset = fieldContext.context.getIndexSettings().getHighlightMaxAnalyzedOffset(); + Integer fieldMaxAnalyzedOffset = fieldContext.field.fieldOptions().maxAnalyzerOffset(); int numberOfFragments = fieldContext.field.fieldOptions().numberOfFragments(); Analyzer analyzer = getAnalyzer(fieldContext.context.mapperService().documentMapper()); + if (fieldMaxAnalyzedOffset != null) { + analyzer = getLimitedOffsetAnalyzer(analyzer, fieldMaxAnalyzedOffset); + } PassageFormatter passageFormatter = getPassageFormatter(fieldContext.hitContext, fieldContext.field, encoder); IndexSearcher searcher = fieldContext.context.searcher(); OffsetSource offsetSource = getOffsetSource(fieldContext.fieldType); @@ -174,7 +196,8 @@ CustomUnifiedHighlighter buildHighlighter(FieldHighlightContext fieldContext) th fieldContext.field.fieldOptions().noMatchSize(), higlighterNumberOfFragments, fieldMatcher(fieldContext), - maxAnalyzedOffset + maxAnalyzedOffset, + fieldMaxAnalyzedOffset ); } diff --git a/server/src/test/java/org/opensearch/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java b/server/src/test/java/org/opensearch/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java index 5383a153034e9..ba4d16c87bed1 100644 --- a/server/src/test/java/org/opensearch/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java +++ b/server/src/test/java/org/opensearch/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java @@ -43,7 +43,6 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.queries.CommonTermsQuery; import org.apache.lucene.search.BooleanClause; @@ -63,6 +62,7 @@ import org.apache.lucene.search.uhighlight.Snippet; import org.apache.lucene.search.uhighlight.UnifiedHighlighter; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.opensearch.common.Strings; import org.opensearch.common.lucene.search.MultiPhrasePrefixQuery; import org.opensearch.test.OpenSearchTestCase; @@ -117,7 +117,8 @@ private void assertHighlightOneDoc( noMatchSize, expectedPassages.length, name -> "text".equals(name), - Integer.MAX_VALUE + Integer.MAX_VALUE, + null ); final Snippet[] snippets = highlighter.highlightField(getOnlyLeafReader(reader), topDocs.scoreDocs[0].doc, () -> rawValue); assertEquals(snippets.length, expectedPassages.length); From 5db75c1fd754ed4e8d4fafa1f8dc96989a907ad4 Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Fri, 22 Jul 2022 11:03:10 -0700 Subject: [PATCH 032/104] Deprecate public methods and variables that contain 'master' terminology in 'test/framework' directory (#3978) * Deprecate methods with 'master' name mainly in class 'InternalTestCluster', 'NodeRoles', and 'AbstractSnapshotIntegTestCase' * Deprecate variables contain 'master' in the name in class 'InternalTestCluster' Signed-off-by: Tianli Feng --- .../ingest/common/IngestRestartIT.java | 2 +- .../reindex/DeleteByQueryBasicTests.java | 4 +- .../opensearch/index/reindex/RetryTests.java | 2 +- ...eCloudStorageBlobStoreRepositoryTests.java | 2 +- .../s3/S3BlobStoreRepositoryTests.java | 2 +- .../admin/cluster/node/tasks/TasksIT.java | 4 +- ...ansportClusterStateActionDisruptionIT.java | 4 +- .../admin/indices/create/CreateIndexIT.java | 2 +- .../admin/indices/create/ShrinkIndexIT.java | 2 +- .../IndexingClusterManagerFailoverIT.java | 2 +- ...tReplicationActionRetryOnClosedNodeIT.java | 2 +- .../opensearch/cluster/ClusterHealthIT.java | 12 +- .../cluster/ClusterInfoServiceIT.java | 10 +- .../cluster/MinimumClusterManagerNodesIT.java | 15 +- .../SpecificClusterManagerNodesIT.java | 34 +-- .../cluster/UpdateSettingsValidationIT.java | 4 +- .../action/shard/ShardStateActionIT.java | 2 +- .../coordination/RareClusterStateIT.java | 12 +- .../UnsafeBootstrapAndDetachCommandIT.java | 18 +- .../coordination/VotingConfigurationIT.java | 6 +- .../cluster/coordination/ZenDiscoveryIT.java | 8 +- .../decider/DiskThresholdDeciderIT.java | 16 +- .../allocation/decider/MockDiskUsagesIT.java | 6 +- .../discovery/ClusterDisruptionIT.java | 6 +- .../discovery/ClusterManagerDisruptionIT.java | 8 +- .../discovery/DiscoveryDisruptionIT.java | 6 +- .../discovery/SnapshotDisruptionIT.java | 14 +- .../StableClusterManagerDisruptionIT.java | 12 +- .../index/mapper/DynamicMappingIT.java | 2 +- .../indices/recovery/IndexRecoveryIT.java | 2 +- .../recovery/ReplicaToPrimaryPromotionIT.java | 4 +- .../indices/settings/UpdateSettingsIT.java | 2 +- .../state/CloseWhileRelocatingShardsIT.java | 5 +- .../indices/state/ReopenWhileClosingIT.java | 2 +- .../store/IndicesStoreIntegrationIT.java | 10 +- .../persistent/PersistentTasksExecutorIT.java | 4 +- .../decider/EnableAssignmentDeciderIT.java | 2 +- .../repositories/RepositoriesServiceIT.java | 4 +- .../BlobStoreRepositoryCleanupIT.java | 16 +- .../SearchScrollWithFailingNodesIT.java | 2 +- .../opensearch/snapshots/CloneSnapshotIT.java | 34 +-- .../snapshots/ConcurrentSnapshotsIT.java | 84 +++---- .../CorruptedBlobStoreRepositoryIT.java | 17 +- .../DedicatedClusterSnapshotRestoreIT.java | 20 +- ...etadataLoadingDuringSnapshotRestoreIT.java | 2 +- .../opensearch/snapshots/RepositoriesIT.java | 4 +- .../RepositoryFilterUserMetadataIT.java | 2 +- .../SharedClusterSnapshotRestoreIT.java | 4 +- .../snapshots/SnapshotStatusApisIT.java | 2 +- ...BootstrapServiceDeprecatedMasterTests.java | 4 +- .../ClusterBootstrapServiceTests.java | 4 +- .../coordination/CoordinatorTests.java | 4 +- .../routing/MovePrimaryFirstTests.java | 2 +- .../opensearch/env/NodeEnvironmentTests.java | 6 +- .../env/NodeRepurposeCommandTests.java | 8 +- .../GatewayMetaStatePersistedStateTests.java | 4 +- .../gateway/GatewayServiceTests.java | 4 +- .../transport/ConnectionProfileTests.java | 4 +- .../transport/RemoteClusterServiceTests.java | 4 +- .../blobstore/BlobStoreTestUtil.java | 2 +- ...earchBlobStoreRepositoryIntegTestCase.java | 11 +- .../AbstractSnapshotIntegTestCase.java | 62 ++++-- .../opensearch/test/InternalTestCluster.java | 206 +++++++++++++++--- .../java/org/opensearch/test/NodeRoles.java | 46 +++- .../test/OpenSearchIntegTestCase.java | 2 +- .../BlockMasterServiceOnMaster.java | 2 +- .../BusyMasterServiceDisruption.java | 2 +- .../test/rest/yaml/ClientYamlTestClient.java | 6 + .../yaml/ClientYamlTestExecutionContext.java | 8 +- .../test/rest/yaml/section/DoSection.java | 2 +- .../test/test/InternalTestClusterTests.java | 4 +- 71 files changed, 539 insertions(+), 299 deletions(-) diff --git a/modules/ingest-common/src/internalClusterTest/java/org/opensearch/ingest/common/IngestRestartIT.java b/modules/ingest-common/src/internalClusterTest/java/org/opensearch/ingest/common/IngestRestartIT.java index c662843b91ebb..f27c903d8795f 100644 --- a/modules/ingest-common/src/internalClusterTest/java/org/opensearch/ingest/common/IngestRestartIT.java +++ b/modules/ingest-common/src/internalClusterTest/java/org/opensearch/ingest/common/IngestRestartIT.java @@ -166,7 +166,7 @@ public void testScriptDisabled() throws Exception { checkPipelineExists.accept(pipelineIdWithScript); checkPipelineExists.accept(pipelineIdWithoutScript); - internalCluster().restartNode(internalCluster().getMasterName(), new InternalTestCluster.RestartCallback() { + internalCluster().restartNode(internalCluster().getClusterManagerName(), new InternalTestCluster.RestartCallback() { @Override public Settings onNodeStopped(String nodeName) { diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/DeleteByQueryBasicTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/DeleteByQueryBasicTests.java index 21bbb02fb147c..baf3c83bd0050 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/DeleteByQueryBasicTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/DeleteByQueryBasicTests.java @@ -274,9 +274,9 @@ public void testDeleteByQueryOnReadOnlyAllowDeleteIndex() throws Exception { InternalTestCluster internalTestCluster = internalCluster(); InternalClusterInfoService infoService = (InternalClusterInfoService) internalTestCluster.getInstance( ClusterInfoService.class, - internalTestCluster.getMasterName() + internalTestCluster.getClusterManagerName() ); - ThreadPool threadPool = internalTestCluster.getInstance(ThreadPool.class, internalTestCluster.getMasterName()); + ThreadPool threadPool = internalTestCluster.getInstance(ThreadPool.class, internalTestCluster.getClusterManagerName()); // Refresh the cluster info after a random delay to check the disk threshold and release the block on the index threadPool.schedule(infoService::refresh, TimeValue.timeValueMillis(randomIntBetween(1, 100)), ThreadPool.Names.MANAGEMENT); // The delete by query request will be executed successfully because the block will be released diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/RetryTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/RetryTests.java index 55234bcaaf0e2..0b052f8fd57a3 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/RetryTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/RetryTests.java @@ -206,7 +206,7 @@ private void testCase( assertFalse(initialBulkResponse.buildFailureMessage(), initialBulkResponse.hasFailures()); client().admin().indices().prepareRefresh("source").get(); - AbstractBulkByScrollRequestBuilder builder = request.apply(internalCluster().masterClient()); + AbstractBulkByScrollRequestBuilder builder = request.apply(internalCluster().clusterManagerClient()); // Make sure we use more than one batch so we have to scroll builder.source().setSize(DOC_COUNT / randomIntBetween(2, 10)); diff --git a/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java b/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java index 274a416d57431..c0a396724897f 100644 --- a/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java +++ b/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java @@ -130,7 +130,7 @@ protected Settings nodeSettings(int nodeOrdinal) { public void testDeleteSingleItem() { final String repoName = createRepository(randomName()); - final RepositoriesService repositoriesService = internalCluster().getMasterNodeInstance(RepositoriesService.class); + final RepositoriesService repositoriesService = internalCluster().getClusterManagerNodeInstance(RepositoriesService.class); final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(repoName); PlainActionFuture.get( f -> repository.threadPool() diff --git a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java index e31a9f8cf3856..b35d4080a413a 100644 --- a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -172,7 +172,7 @@ public void testEnforcedCooldownPeriod() throws IOException { .get() .getSnapshotInfo() .snapshotId(); - final RepositoriesService repositoriesService = internalCluster().getCurrentMasterNodeInstance(RepositoriesService.class); + final RepositoriesService repositoriesService = internalCluster().getCurrentClusterManagerNodeInstance(RepositoriesService.class); final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(repoName); final RepositoryData repositoryData = getRepositoryData(repository); final RepositoryData modifiedRepositoryData = repositoryData.withVersions( diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java index 9c6c67116ef8a..d37285211f774 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java @@ -158,14 +158,14 @@ public void testClusterManagerNodeOperationTasks() { registerTaskManagerListeners(ClusterHealthAction.NAME); // First run the health on the cluster-manager node - should produce only one task on the cluster-manager node - internalCluster().masterClient().admin().cluster().prepareHealth().get(); + internalCluster().clusterManagerClient().admin().cluster().prepareHealth().get(); assertEquals(1, numberOfEvents(ClusterHealthAction.NAME, Tuple::v1)); // counting only registration events assertEquals(1, numberOfEvents(ClusterHealthAction.NAME, event -> event.v1() == false)); // counting only unregistration events resetTaskManagerListeners(ClusterHealthAction.NAME); // Now run the health on a non-cluster-manager node - should produce one task on cluster-manager and one task on another node - internalCluster().nonMasterClient().admin().cluster().prepareHealth().get(); + internalCluster().nonClusterManagerClient().admin().cluster().prepareHealth().get(); assertEquals(2, numberOfEvents(ClusterHealthAction.NAME, Tuple::v1)); // counting only registration events assertEquals(2, numberOfEvents(ClusterHealthAction.NAME, event -> event.v1() == false)); // counting only unregistration events List tasks = findEvents(ClusterHealthAction.NAME, Tuple::v1); diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/state/TransportClusterStateActionDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/state/TransportClusterStateActionDisruptionIT.java index 3be75d672d823..1a0cad36c8f04 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/state/TransportClusterStateActionDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/state/TransportClusterStateActionDisruptionIT.java @@ -194,7 +194,7 @@ public void runRepeatedlyWhileChangingClusterManager(Runnable runnable) throws E ) ); - final String clusterManagerName = internalCluster().getMasterName(); + final String clusterManagerName = internalCluster().getClusterManagerName(); final AtomicBoolean shutdown = new AtomicBoolean(); final Thread assertingThread = new Thread(() -> { @@ -245,7 +245,7 @@ public void runRepeatedlyWhileChangingClusterManager(Runnable runnable) throws E clusterManagerName, () -> randomFrom(internalCluster().getNodeNames()) ); - final String claimedClusterManagerName = internalCluster().getMasterName(nonClusterManagerNode); + final String claimedClusterManagerName = internalCluster().getClusterManagerName(nonClusterManagerNode); assertThat(claimedClusterManagerName, not(equalTo(clusterManagerName))); }); diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/CreateIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/CreateIndexIT.java index 3ef2a63c7d0ac..51ef63ae9e9c1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/CreateIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/CreateIndexIT.java @@ -340,7 +340,7 @@ public void testFailureToCreateIndexCleansUpIndicesService() { IllegalStateException.class ); - IndicesService indicesService = internalCluster().getInstance(IndicesService.class, internalCluster().getMasterName()); + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, internalCluster().getClusterManagerName()); for (IndexService indexService : indicesService) { assertThat(indexService.index().getName(), not("test-idx-2")); } diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/ShrinkIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/ShrinkIndexIT.java index e8a6c68a41076..daa124fab2220 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/ShrinkIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/ShrinkIndexIT.java @@ -496,7 +496,7 @@ public void testCreateShrinkIndexFails() throws Exception { final InternalClusterInfoService infoService = (InternalClusterInfoService) internalCluster().getInstance( ClusterInfoService.class, - internalCluster().getMasterName() + internalCluster().getClusterManagerName() ); infoService.refresh(); // kick off a retry and wait until it's done! diff --git a/server/src/internalClusterTest/java/org/opensearch/action/support/clustermanager/IndexingClusterManagerFailoverIT.java b/server/src/internalClusterTest/java/org/opensearch/action/support/clustermanager/IndexingClusterManagerFailoverIT.java index 60ba9a4bfcf98..959b16d4c4694 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/support/clustermanager/IndexingClusterManagerFailoverIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/support/clustermanager/IndexingClusterManagerFailoverIT.java @@ -67,7 +67,7 @@ public void testClusterManagerFailoverDuringIndexingWithMappingChanges() throws internalCluster().setBootstrapClusterManagerNodeIndex(2); - internalCluster().startMasterOnlyNodes(3, Settings.EMPTY); + internalCluster().startClusterManagerOnlyNodes(3, Settings.EMPTY); String dataNode = internalCluster().startDataOnlyNode(Settings.EMPTY); diff --git a/server/src/internalClusterTest/java/org/opensearch/action/support/replication/TransportReplicationActionRetryOnClosedNodeIT.java b/server/src/internalClusterTest/java/org/opensearch/action/support/replication/TransportReplicationActionRetryOnClosedNodeIT.java index 8a9f3eb5f2e63..725cc894185e3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/support/replication/TransportReplicationActionRetryOnClosedNodeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/support/replication/TransportReplicationActionRetryOnClosedNodeIT.java @@ -203,7 +203,7 @@ public void sendRequest( } public void testRetryOnStoppedTransportService() throws Exception { - internalCluster().startMasterOnlyNodes(2); + internalCluster().startClusterManagerOnlyNodes(2); String primary = internalCluster().startDataOnlyNode(); assertAcked( prepareCreate("test").setSettings( diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterHealthIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterHealthIT.java index 14eaeb1e6dfcf..56f290ef4b50c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterHealthIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterHealthIT.java @@ -307,7 +307,10 @@ public void testWaitForEventsRetriesIfOtherConditionsNotMet() { .execute(); final AtomicBoolean keepSubmittingTasks = new AtomicBoolean(true); - final ClusterService clusterService = internalCluster().getInstance(ClusterService.class, internalCluster().getMasterName()); + final ClusterService clusterService = internalCluster().getInstance( + ClusterService.class, + internalCluster().getClusterManagerName() + ); final PlainActionFuture completionFuture = new PlainActionFuture<>(); clusterService.submitStateUpdateTask("looping task", new ClusterStateUpdateTask(Priority.LOW) { @Override @@ -377,7 +380,7 @@ public void testHealthOnClusterManagerFailover() throws Exception { .setClusterManagerNodeTimeout(TimeValue.timeValueMinutes(2)) .execute() ); - internalCluster().restartNode(internalCluster().getMasterName(), InternalTestCluster.EMPTY_CALLBACK); + internalCluster().restartNode(internalCluster().getClusterManagerName(), InternalTestCluster.EMPTY_CALLBACK); } if (withIndex) { assertAcked( @@ -396,7 +399,10 @@ public void testHealthOnClusterManagerFailover() throws Exception { public void testWaitForEventsTimesOutIfClusterManagerBusy() { final AtomicBoolean keepSubmittingTasks = new AtomicBoolean(true); - final ClusterService clusterService = internalCluster().getInstance(ClusterService.class, internalCluster().getMasterName()); + final ClusterService clusterService = internalCluster().getInstance( + ClusterService.class, + internalCluster().getClusterManagerName() + ); final PlainActionFuture completionFuture = new PlainActionFuture<>(); clusterService.submitStateUpdateTask("looping task", new ClusterStateUpdateTask(Priority.LOW) { @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterInfoServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterInfoServiceIT.java index dae9505fe67bf..b133b864a6b82 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterInfoServiceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterInfoServiceIT.java @@ -169,7 +169,7 @@ public void testClusterInfoServiceCollectsInformation() { // Get the cluster info service on the cluster-manager node final InternalClusterInfoService infoService = (InternalClusterInfoService) internalTestCluster.getInstance( ClusterInfoService.class, - internalTestCluster.getMasterName() + internalTestCluster.getClusterManagerName() ); infoService.setUpdateFrequency(TimeValue.timeValueMillis(200)); ClusterInfo info = infoService.refresh(); @@ -193,7 +193,7 @@ public void testClusterInfoServiceCollectsInformation() { logger.info("--> shard size: {}", size.value); assertThat("shard size is greater than 0", size.value, greaterThanOrEqualTo(0L)); } - ClusterService clusterService = internalTestCluster.getInstance(ClusterService.class, internalTestCluster.getMasterName()); + ClusterService clusterService = internalTestCluster.getInstance(ClusterService.class, internalTestCluster.getClusterManagerName()); ClusterState state = clusterService.state(); for (ShardRouting shard : state.routingTable().allShards()) { String dataPath = info.getDataPath(shard); @@ -221,7 +221,7 @@ public void testClusterInfoServiceInformationClearOnError() { InternalTestCluster internalTestCluster = internalCluster(); InternalClusterInfoService infoService = (InternalClusterInfoService) internalTestCluster.getInstance( ClusterInfoService.class, - internalTestCluster.getMasterName() + internalTestCluster.getClusterManagerName() ); // get one healthy sample ClusterInfo info = infoService.refresh(); @@ -231,7 +231,7 @@ public void testClusterInfoServiceInformationClearOnError() { MockTransportService mockTransportService = (MockTransportService) internalCluster().getInstance( TransportService.class, - internalTestCluster.getMasterName() + internalTestCluster.getClusterManagerName() ); final AtomicBoolean timeout = new AtomicBoolean(false); @@ -272,7 +272,7 @@ public void testClusterInfoServiceInformationClearOnError() { // now we cause an exception timeout.set(false); - ActionFilters actionFilters = internalTestCluster.getInstance(ActionFilters.class, internalTestCluster.getMasterName()); + ActionFilters actionFilters = internalTestCluster.getInstance(ActionFilters.class, internalTestCluster.getClusterManagerName()); BlockingActionFilter blockingActionFilter = null; for (ActionFilter filter : actionFilters.filters()) { if (filter instanceof BlockingActionFilter) { diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumClusterManagerNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumClusterManagerNodesIT.java index c431dfaa14fa0..0bed559085b27 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumClusterManagerNodesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumClusterManagerNodesIT.java @@ -151,7 +151,7 @@ public void testTwoNodesNoClusterManagerBlock() throws Exception { ); } - String clusterManagerNode = internalCluster().getMasterName(); + String clusterManagerNode = internalCluster().getClusterManagerName(); String otherNode = node1Name.equals(clusterManagerNode) ? node2Name : node1Name; logger.info("--> add voting config exclusion for non-cluster-manager node, to be sure it's not elected"); client().execute(AddVotingConfigExclusionsAction.INSTANCE, new AddVotingConfigExclusionsRequest(otherNode)).get(); @@ -204,7 +204,7 @@ public void testTwoNodesNoClusterManagerBlock() throws Exception { clearRequest.setWaitForRemoval(false); client().execute(ClearVotingConfigExclusionsAction.INSTANCE, clearRequest).get(); - clusterManagerNode = internalCluster().getMasterName(); + clusterManagerNode = internalCluster().getClusterManagerName(); otherNode = node1Name.equals(clusterManagerNode) ? node2Name : node1Name; logger.info("--> add voting config exclusion for cluster-manager node, to be sure it's not elected"); client().execute(AddVotingConfigExclusionsAction.INSTANCE, new AddVotingConfigExclusionsRequest(clusterManagerNode)).get(); @@ -310,12 +310,15 @@ public void testThreeNodesNoClusterManagerBlock() throws Exception { } List nonClusterManagerNodes = new ArrayList<>( - Sets.difference(Sets.newHashSet(internalCluster().getNodeNames()), Collections.singleton(internalCluster().getMasterName())) + Sets.difference( + Sets.newHashSet(internalCluster().getNodeNames()), + Collections.singleton(internalCluster().getClusterManagerName()) + ) ); Settings nonClusterManagerDataPathSettings1 = internalCluster().dataPathSettings(nonClusterManagerNodes.get(0)); Settings nonClusterManagerDataPathSettings2 = internalCluster().dataPathSettings(nonClusterManagerNodes.get(1)); - internalCluster().stopRandomNonMasterNode(); - internalCluster().stopRandomNonMasterNode(); + internalCluster().stopRandomNonClusterManagerNode(); + internalCluster().stopRandomNonClusterManagerNode(); logger.info("--> verify that there is no cluster-manager anymore on remaining node"); // spin here to wait till the state is set @@ -347,7 +350,7 @@ public void testCannotCommitStateThreeNodes() throws Exception { internalCluster().startNodes(3, settings); ensureStableCluster(3); - final String clusterManager = internalCluster().getMasterName(); + final String clusterManager = internalCluster().getClusterManagerName(); Set otherNodes = new HashSet<>(Arrays.asList(internalCluster().getNodeNames())); otherNodes.remove(clusterManager); NetworkDisruption partition = isolateClusterManagerDisruption(NetworkDisruption.DISCONNECT); diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/SpecificClusterManagerNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/SpecificClusterManagerNodesIT.java index 073f006d105f2..3c3d10d5e2bc2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/SpecificClusterManagerNodesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/SpecificClusterManagerNodesIT.java @@ -45,8 +45,8 @@ import java.io.IOException; +import static org.opensearch.test.NodeRoles.clusterManagerNode; import static org.opensearch.test.NodeRoles.dataOnlyNode; -import static org.opensearch.test.NodeRoles.masterNode; import static org.opensearch.test.NodeRoles.nonDataNode; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; @@ -79,7 +79,7 @@ public void testSimpleOnlyClusterManagerNodeElection() throws IOException { logger.info("--> start cluster-manager node"); final String clusterManagerNodeName = internalCluster().startClusterManagerOnlyNode(); assertThat( - internalCluster().nonMasterClient() + internalCluster().nonClusterManagerClient() .admin() .cluster() .prepareState() @@ -92,7 +92,7 @@ public void testSimpleOnlyClusterManagerNodeElection() throws IOException { equalTo(clusterManagerNodeName) ); assertThat( - internalCluster().masterClient() + internalCluster().clusterManagerClient() .admin() .cluster() .prepareState() @@ -106,8 +106,8 @@ public void testSimpleOnlyClusterManagerNodeElection() throws IOException { ); logger.info("--> stop cluster-manager node"); - Settings clusterManagerDataPathSettings = internalCluster().dataPathSettings(internalCluster().getMasterName()); - internalCluster().stopCurrentMasterNode(); + Settings clusterManagerDataPathSettings = internalCluster().dataPathSettings(internalCluster().getClusterManagerName()); + internalCluster().stopCurrentClusterManagerNode(); try { assertThat( @@ -129,10 +129,10 @@ public void testSimpleOnlyClusterManagerNodeElection() throws IOException { logger.info("--> start previous cluster-manager node again"); final String nextClusterManagerEligibleNodeName = internalCluster().startNode( - Settings.builder().put(nonDataNode(masterNode())).put(clusterManagerDataPathSettings) + Settings.builder().put(nonDataNode(clusterManagerNode())).put(clusterManagerDataPathSettings) ); assertThat( - internalCluster().nonMasterClient() + internalCluster().nonClusterManagerClient() .admin() .cluster() .prepareState() @@ -145,7 +145,7 @@ public void testSimpleOnlyClusterManagerNodeElection() throws IOException { equalTo(nextClusterManagerEligibleNodeName) ); assertThat( - internalCluster().masterClient() + internalCluster().clusterManagerClient() .admin() .cluster() .prepareState() @@ -183,7 +183,7 @@ public void testElectOnlyBetweenClusterManagerNodes() throws Exception { logger.info("--> start cluster-manager node (1)"); final String clusterManagerNodeName = internalCluster().startClusterManagerOnlyNode(); assertThat( - internalCluster().nonMasterClient() + internalCluster().nonClusterManagerClient() .admin() .cluster() .prepareState() @@ -196,7 +196,7 @@ public void testElectOnlyBetweenClusterManagerNodes() throws Exception { equalTo(clusterManagerNodeName) ); assertThat( - internalCluster().masterClient() + internalCluster().clusterManagerClient() .admin() .cluster() .prepareState() @@ -212,7 +212,7 @@ public void testElectOnlyBetweenClusterManagerNodes() throws Exception { logger.info("--> start cluster-manager node (2)"); final String nextClusterManagerEligableNodeName = internalCluster().startClusterManagerOnlyNode(); assertThat( - internalCluster().nonMasterClient() + internalCluster().nonClusterManagerClient() .admin() .cluster() .prepareState() @@ -225,7 +225,7 @@ public void testElectOnlyBetweenClusterManagerNodes() throws Exception { equalTo(clusterManagerNodeName) ); assertThat( - internalCluster().nonMasterClient() + internalCluster().nonClusterManagerClient() .admin() .cluster() .prepareState() @@ -238,7 +238,7 @@ public void testElectOnlyBetweenClusterManagerNodes() throws Exception { equalTo(clusterManagerNodeName) ); assertThat( - internalCluster().masterClient() + internalCluster().clusterManagerClient() .admin() .cluster() .prepareState() @@ -256,7 +256,7 @@ public void testElectOnlyBetweenClusterManagerNodes() throws Exception { // removing the cluster-manager from the voting configuration immediately triggers the cluster-manager to step down assertBusy(() -> { assertThat( - internalCluster().nonMasterClient() + internalCluster().nonClusterManagerClient() .admin() .cluster() .prepareState() @@ -269,7 +269,7 @@ public void testElectOnlyBetweenClusterManagerNodes() throws Exception { equalTo(nextClusterManagerEligableNodeName) ); assertThat( - internalCluster().masterClient() + internalCluster().clusterManagerClient() .admin() .cluster() .prepareState() @@ -284,7 +284,7 @@ public void testElectOnlyBetweenClusterManagerNodes() throws Exception { }); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(clusterManagerNodeName)); assertThat( - internalCluster().nonMasterClient() + internalCluster().nonClusterManagerClient() .admin() .cluster() .prepareState() @@ -297,7 +297,7 @@ public void testElectOnlyBetweenClusterManagerNodes() throws Exception { equalTo(nextClusterManagerEligableNodeName) ); assertThat( - internalCluster().masterClient() + internalCluster().clusterManagerClient() .admin() .cluster() .prepareState() diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/UpdateSettingsValidationIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/UpdateSettingsValidationIT.java index 66f22a4fdf1b3..4b830acddefdf 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/UpdateSettingsValidationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/UpdateSettingsValidationIT.java @@ -39,14 +39,14 @@ import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; +import static org.opensearch.test.NodeRoles.nonClusterManagerNode; import static org.opensearch.test.NodeRoles.nonDataNode; -import static org.opensearch.test.NodeRoles.nonMasterNode; import static org.hamcrest.Matchers.equalTo; @ClusterScope(scope = Scope.TEST, numDataNodes = 0) public class UpdateSettingsValidationIT extends OpenSearchIntegTestCase { public void testUpdateSettingsValidation() throws Exception { - internalCluster().startNodes(nonDataNode(), nonMasterNode(), nonMasterNode()); + internalCluster().startNodes(nonDataNode(), nonClusterManagerNode(), nonClusterManagerNode()); createIndex("test"); NumShards test = getNumShards("test"); diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/action/shard/ShardStateActionIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/action/shard/ShardStateActionIT.java index b7e895f38ba19..e0d070c385def 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/action/shard/ShardStateActionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/action/shard/ShardStateActionIT.java @@ -123,7 +123,7 @@ public void testFollowupRerouteCanBeSetToHigherPriority() { final AtomicBoolean stopSpammingClusterManager = new AtomicBoolean(); final ClusterService clusterManagerClusterService = internalCluster().getInstance( ClusterService.class, - internalCluster().getMasterName() + internalCluster().getClusterManagerName() ); clusterManagerClusterService.submitStateUpdateTask("spam", new ClusterStateUpdateTask(Priority.HIGH) { @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/RareClusterStateIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/RareClusterStateIT.java index 90b9ff7cab3b5..3b99ce73adf5b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/RareClusterStateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/RareClusterStateIT.java @@ -105,7 +105,7 @@ public void testAssignmentWithJustAddedNodes() { // close to have some unassigned started shards shards.. client().admin().indices().prepareClose(index).get(); - final String clusterManagerName = internalCluster().getMasterName(); + final String clusterManagerName = internalCluster().getClusterManagerName(); final ClusterService clusterService = internalCluster().clusterService(clusterManagerName); final AllocationService allocationService = internalCluster().getInstance(AllocationService.class, clusterManagerName); clusterService.submitStateUpdateTask("test-inject-node-and-reroute", new ClusterStateUpdateTask() { @@ -159,7 +159,7 @@ private ActionFuture { assertFalse(clusterManagerCoordinator.publicationInProgress()); final long applierVersion = clusterManagerCoordinator.getApplierState().version(); @@ -202,7 +202,9 @@ public void testDeleteCreateInOneBulk() throws Exception { ensureGreen(TimeValue.timeValueMinutes(30), "test"); // due to publish_timeout of 0, wait for data node to have cluster state fully applied assertBusy(() -> { - long clusterManagerClusterStateVersion = internalCluster().clusterService(internalCluster().getMasterName()).state().version(); + long clusterManagerClusterStateVersion = internalCluster().clusterService(internalCluster().getClusterManagerName()) + .state() + .version(); long dataClusterStateVersion = internalCluster().clusterService(dataNode).state().version(); assertThat(clusterManagerClusterStateVersion, equalTo(dataClusterStateVersion)); }); @@ -220,7 +222,7 @@ public void testDelayedMappingPropagationOnPrimary() throws Exception { final List nodeNames = internalCluster().startNodes(2); assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut()); - final String clusterManager = internalCluster().getMasterName(); + final String clusterManager = internalCluster().getClusterManagerName(); assertThat(nodeNames, hasItem(clusterManager)); String otherNode = null; for (String node : nodeNames) { @@ -308,7 +310,7 @@ public void testDelayedMappingPropagationOnReplica() throws Exception { final List nodeNames = internalCluster().startNodes(2); assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut()); - final String clusterManager = internalCluster().getMasterName(); + final String clusterManager = internalCluster().getClusterManagerName(); assertThat(nodeNames, hasItem(clusterManager)); String otherNode = null; for (String node : nodeNames) { diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java index 72e24b0396695..0bbeeea9e27a2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java @@ -62,7 +62,7 @@ import static org.opensearch.gateway.DanglingIndicesState.AUTO_IMPORT_DANGLING_INDICES_SETTING; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.indices.recovery.RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING; -import static org.opensearch.test.NodeRoles.nonMasterNode; +import static org.opensearch.test.NodeRoles.nonClusterManagerNode; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; @@ -175,7 +175,7 @@ private void removeBlock() { public void testBootstrapNotClusterManagerEligible() { final Environment environment = TestEnvironment.newEnvironment( - Settings.builder().put(nonMasterNode(internalCluster().getDefaultSettings())).build() + Settings.builder().put(nonClusterManagerNode(internalCluster().getDefaultSettings())).build() ); expectThrows(() -> unsafeBootstrap(environment), UnsafeBootstrapClusterManagerCommand.NOT_CLUSTER_MANAGER_NODE_MSG); } @@ -238,7 +238,7 @@ public void testBootstrapNoClusterState() throws IOException { String node = internalCluster().startNode(); Settings dataPathSettings = internalCluster().dataPathSettings(node); ensureStableCluster(1); - NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); + NodeEnvironment nodeEnvironment = internalCluster().getClusterManagerNodeInstance(NodeEnvironment.class); internalCluster().stopRandomDataNode(); Environment environment = TestEnvironment.newEnvironment( Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build() @@ -253,7 +253,7 @@ public void testDetachNoClusterState() throws IOException { String node = internalCluster().startNode(); Settings dataPathSettings = internalCluster().dataPathSettings(node); ensureStableCluster(1); - NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); + NodeEnvironment nodeEnvironment = internalCluster().getClusterManagerNodeInstance(NodeEnvironment.class); internalCluster().stopRandomDataNode(); Environment environment = TestEnvironment.newEnvironment( Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build() @@ -306,7 +306,7 @@ public void test3ClusterManagerNodes2Failed() throws Exception { ); // node ordinal 1 logger.info("--> start 2nd and 3rd cluster-manager-eligible nodes and bootstrap"); - clusterManagerNodes.addAll(internalCluster().startMasterOnlyNodes(2)); // node ordinals 2 and 3 + clusterManagerNodes.addAll(internalCluster().startClusterManagerOnlyNodes(2)); // node ordinals 2 and 3 logger.info("--> wait for all nodes to join the cluster"); ensureStableCluster(4); @@ -351,7 +351,7 @@ public void test3ClusterManagerNodes2Failed() throws Exception { ); logger.info("--> stop 1st cluster-manager-eligible node and data-only node"); - NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); + NodeEnvironment nodeEnvironment = internalCluster().getClusterManagerNodeInstance(NodeEnvironment.class); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(clusterManagerNodes.get(0))); assertBusy(() -> internalCluster().getInstance(GatewayMetaState.class, dataNode).allPendingAsyncStatesWritten()); internalCluster().stopRandomDataNode(); @@ -492,7 +492,7 @@ public void testNoInitialBootstrapAfterDetach() throws Exception { internalCluster().setBootstrapClusterManagerNodeIndex(0); String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); Settings clusterManagerNodeDataPathSettings = internalCluster().dataPathSettings(clusterManagerNode); - internalCluster().stopCurrentMasterNode(); + internalCluster().stopCurrentClusterManagerNode(); final Environment environment = TestEnvironment.newEnvironment( Settings.builder().put(internalCluster().getDefaultSettings()).put(clusterManagerNodeDataPathSettings).build() @@ -527,7 +527,7 @@ public void testCanRunUnsafeBootstrapAfterErroneousDetachWithoutLoosingMetadata( ensureReadOnlyBlock(false, clusterManagerNode); - internalCluster().stopCurrentMasterNode(); + internalCluster().stopCurrentClusterManagerNode(); final Environment environment = TestEnvironment.newEnvironment( Settings.builder().put(internalCluster().getDefaultSettings()).put(clusterManagerNodeDataPathSettings).build() @@ -557,7 +557,7 @@ public void testUnsafeBootstrapWithApplyClusterReadOnlyBlockAsFalse() throws Exc ensureReadOnlyBlock(false, clusterManagerNode); - internalCluster().stopCurrentMasterNode(); + internalCluster().stopCurrentClusterManagerNode(); final Environment environment = TestEnvironment.newEnvironment( Settings.builder().put(internalCluster().getDefaultSettings()).put(clusterManagerNodeDataPathSettings).build() diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/VotingConfigurationIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/VotingConfigurationIT.java index 64474546f6106..de1114e152767 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/VotingConfigurationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/VotingConfigurationIT.java @@ -64,12 +64,12 @@ protected Collection> nodePlugins() { public void testAbdicateAfterVotingConfigExclusionAdded() throws ExecutionException, InterruptedException { internalCluster().setBootstrapClusterManagerNodeIndex(0); internalCluster().startNodes(2); - final String originalClusterManager = internalCluster().getMasterName(); + final String originalClusterManager = internalCluster().getClusterManagerName(); logger.info("--> excluding cluster-manager node {}", originalClusterManager); client().execute(AddVotingConfigExclusionsAction.INSTANCE, new AddVotingConfigExclusionsRequest(originalClusterManager)).get(); client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); - assertNotEquals(originalClusterManager, internalCluster().getMasterName()); + assertNotEquals(originalClusterManager, internalCluster().getClusterManagerName()); } public void testElectsNodeNotInVotingConfiguration() throws Exception { @@ -134,7 +134,7 @@ public void testElectsNodeNotInVotingConfiguration() throws Exception { } } - internalCluster().stopCurrentMasterNode(); + internalCluster().stopCurrentClusterManagerNode(); assertFalse( internalCluster().client() .admin() diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/ZenDiscoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/ZenDiscoveryIT.java index 84bf25141d5e0..66ff9eb15ae0e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/ZenDiscoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/ZenDiscoveryIT.java @@ -91,10 +91,10 @@ public void testNoShardRelocationsOccurWhenElectedClusterManagerNodeFails() thro RecoveryResponse r = client().admin().indices().prepareRecoveries("test").get(); int numRecoveriesBeforeNewClusterManager = r.shardRecoveryStates().get("test").size(); - final String oldClusterManager = internalCluster().getMasterName(); - internalCluster().stopCurrentMasterNode(); + final String oldClusterManager = internalCluster().getClusterManagerName(); + internalCluster().stopCurrentClusterManagerNode(); assertBusy(() -> { - String current = internalCluster().getMasterName(); + String current = internalCluster().getClusterManagerName(); assertThat(current, notNullValue()); assertThat(current, not(equalTo(oldClusterManager))); }); @@ -170,7 +170,7 @@ public void testDiscoveryStats() throws Exception { ensureGreen(); // ensures that all events are processed (in particular state recovery fully completed) assertBusy( () -> assertThat( - internalCluster().clusterService(internalCluster().getMasterName()).getMasterService().numberOfPendingTasks(), + internalCluster().clusterService(internalCluster().getClusterManagerName()).getMasterService().numberOfPendingTasks(), equalTo(0) ) ); // see https://github.com/elastic/elasticsearch/issues/24388 diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java index e26d5be03cf7a..45daea3d066d3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java @@ -160,10 +160,9 @@ public void testHighWatermarkNotExceeded() throws Exception { final String dataNodeName = internalCluster().startDataOnlyNode(); ensureStableCluster(3); - final InternalClusterInfoService clusterInfoService = (InternalClusterInfoService) internalCluster().getCurrentMasterNodeInstance( - ClusterInfoService.class - ); - internalCluster().getCurrentMasterNodeInstance(ClusterService.class).addListener(event -> clusterInfoService.refresh()); + final InternalClusterInfoService clusterInfoService = (InternalClusterInfoService) internalCluster() + .getCurrentClusterManagerNodeInstance(ClusterInfoService.class); + internalCluster().getCurrentClusterManagerNodeInstance(ClusterService.class).addListener(event -> clusterInfoService.refresh()); final String dataNode0Id = internalCluster().getInstance(NodeEnvironment.class, dataNodeName).nodeId(); final Path dataNode0Path = internalCluster().getInstance(Environment.class, dataNodeName).dataFiles()[0]; @@ -203,10 +202,9 @@ public void testRestoreSnapshotAllocationDoesNotExceedWatermark() throws Excepti .setSettings(Settings.builder().put("location", randomRepoPath()).put("compress", randomBoolean())) ); - final InternalClusterInfoService clusterInfoService = (InternalClusterInfoService) internalCluster().getCurrentMasterNodeInstance( - ClusterInfoService.class - ); - internalCluster().getCurrentMasterNodeInstance(ClusterService.class).addListener(event -> clusterInfoService.refresh()); + final InternalClusterInfoService clusterInfoService = (InternalClusterInfoService) internalCluster() + .getCurrentClusterManagerNodeInstance(ClusterInfoService.class); + internalCluster().getCurrentClusterManagerNodeInstance(ClusterService.class).addListener(event -> clusterInfoService.refresh()); final String dataNode0Id = internalCluster().getInstance(NodeEnvironment.class, dataNodeName).nodeId(); final Path dataNode0Path = internalCluster().getInstance(Environment.class, dataNodeName).dataFiles()[0]; @@ -330,7 +328,7 @@ private long createReasonableSizedShards(final String indexName) throws Interrup } private void refreshDiskUsage() { - final ClusterInfoService clusterInfoService = internalCluster().getCurrentMasterNodeInstance(ClusterInfoService.class); + final ClusterInfoService clusterInfoService = internalCluster().getCurrentClusterManagerNodeInstance(ClusterInfoService.class); ((InternalClusterInfoService) clusterInfoService).refresh(); // if the nodes were all under the low watermark already (but unbalanced) then a change in the disk usage doesn't trigger a reroute // even though it's now possible to achieve better balance, so we have to do an explicit reroute. TODO fix this? diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java index 520ad75535033..a7755841c9ea9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java @@ -270,7 +270,7 @@ public void testOnlyMovesEnoughShardsToDropBelowHighWatermark() throws Exception final MockInternalClusterInfoService clusterInfoService = getMockInternalClusterInfoService(); final AtomicReference clusterManagerAppliedClusterState = new AtomicReference<>(); - internalCluster().getCurrentMasterNodeInstance(ClusterService.class).addListener(event -> { + internalCluster().getCurrentClusterManagerNodeInstance(ClusterService.class).addListener(event -> { clusterManagerAppliedClusterState.set(event.state()); clusterInfoService.refresh(); // so that a subsequent reroute sees disk usage according to the current state }); @@ -358,7 +358,7 @@ public void testDoesNotExceedLowWatermarkWhenRebalancing() throws Exception { false ).map(RoutingNode::nodeId).collect(Collectors.toList()); - internalCluster().getCurrentMasterNodeInstance(ClusterService.class).addListener(event -> { + internalCluster().getCurrentClusterManagerNodeInstance(ClusterService.class).addListener(event -> { assertThat(event.state().getRoutingNodes().node(nodeIds.get(2)).size(), lessThanOrEqualTo(1)); clusterManagerAppliedClusterState.set(event.state()); clusterInfoService.refresh(); // so that a subsequent reroute sees disk usage according to the current state @@ -544,7 +544,7 @@ private Map getShardCountByNodeId() { } private MockInternalClusterInfoService getMockInternalClusterInfoService() { - return (MockInternalClusterInfoService) internalCluster().getCurrentMasterNodeInstance(ClusterInfoService.class); + return (MockInternalClusterInfoService) internalCluster().getCurrentClusterManagerNodeInstance(ClusterInfoService.class); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionIT.java index 915aef5cb1d25..184c866aee2db 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionIT.java @@ -338,7 +338,7 @@ public void testRejoinDocumentExistsInAllShardCopies() throws Exception { // simulate handling of sending shard failure during an isolation public void testSendingShardFailure() throws Exception { List nodes = startCluster(3); - String clusterManagerNode = internalCluster().getMasterName(); + String clusterManagerNode = internalCluster().getClusterManagerName(); List nonClusterManagerNodes = nodes.stream().filter(node -> !node.equals(clusterManagerNode)).collect(Collectors.toList()); String nonClusterManagerNode = randomFrom(nonClusterManagerNodes); assertAcked( @@ -463,12 +463,12 @@ public boolean validateClusterForming() { */ public void testIndicesDeleted() throws Exception { final String idxName = "test"; - final List allClusterManagerEligibleNodes = internalCluster().startMasterOnlyNodes(2); + final List allClusterManagerEligibleNodes = internalCluster().startClusterManagerOnlyNodes(2); final String dataNode = internalCluster().startDataOnlyNode(); ensureStableCluster(3); assertAcked(prepareCreate("test")); - final String clusterManagerNode1 = internalCluster().getMasterName(); + final String clusterManagerNode1 = internalCluster().getClusterManagerName(); NetworkDisruption networkDisruption = new NetworkDisruption( new TwoPartitions(clusterManagerNode1, dataNode), NetworkDisruption.UNRESPONSIVE diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java index cd8846067ad1f..412b325b14e73 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java @@ -71,7 +71,7 @@ public class ClusterManagerDisruptionIT extends AbstractDisruptionTestCase { public void testClusterManagerNodeGCs() throws Exception { List nodes = startCluster(3); - String oldClusterManagerNode = internalCluster().getMasterName(); + String oldClusterManagerNode = internalCluster().getClusterManagerName(); // a very long GC, but it's OK as we remove the disruption when it has had an effect SingleNodeDisruption clusterManagerNodeDisruption = new IntermittentLongGCDisruption( random(), @@ -105,7 +105,7 @@ public void testClusterManagerNodeGCs() throws Exception { ensureStableCluster(3, waitTime, false, oldNonClusterManagerNodes.get(0)); // make sure all nodes agree on cluster-manager - String newClusterManager = internalCluster().getMasterName(); + String newClusterManager = internalCluster().getClusterManagerName(); assertThat(newClusterManager, not(equalTo(oldClusterManagerNode))); assertClusterManager(newClusterManager, nodes); } @@ -126,7 +126,7 @@ public void testIsolateClusterManagerAndVerifyClusterStateConsensus() throws Exc ); ensureGreen(); - String isolatedNode = internalCluster().getMasterName(); + String isolatedNode = internalCluster().getClusterManagerName(); TwoPartitions partitions = isolateNode(isolatedNode); NetworkDisruption networkDisruption = addRandomDisruptionType(partitions); networkDisruption.startDisrupting(); @@ -296,7 +296,7 @@ public void testMappingTimeout() throws Exception { Settings.builder() .put("index.number_of_shards", 1) .put("index.number_of_replicas", 1) - .put("index.routing.allocation.exclude._name", internalCluster().getMasterName()) + .put("index.routing.allocation.exclude._name", internalCluster().getClusterManagerName()) .build() ); diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java index 1825fbdcf32d5..a2864b6dfd1da 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java @@ -136,7 +136,7 @@ public void testClusterJoinDespiteOfPublishingIssues() throws Exception { // shutting down the nodes, to avoid the leakage check tripping // on the states associated with the commit requests we may have dropped - internalCluster().stopRandomNonMasterNode(); + internalCluster().stopRandomNonClusterManagerNode(); } public void testClusterFormingWithASlowNode() { @@ -170,7 +170,7 @@ public void testElectClusterManagerWithLatestVersion() throws Exception { } internalCluster().clearDisruptionScheme(); ensureStableCluster(3); - final String preferredClusterManagerName = internalCluster().getMasterName(); + final String preferredClusterManagerName = internalCluster().getClusterManagerName(); final DiscoveryNode preferredClusterManager = internalCluster().clusterService(preferredClusterManagerName).localNode(); logger.info("--> preferred cluster-manager is {}", preferredClusterManager); @@ -214,7 +214,7 @@ public void testElectClusterManagerWithLatestVersion() throws Exception { public void testNodeNotReachableFromClusterManager() throws Exception { startCluster(3); - String clusterManagerNode = internalCluster().getMasterName(); + String clusterManagerNode = internalCluster().getClusterManagerName(); String nonClusterManagerNode = null; while (nonClusterManagerNode == null) { nonClusterManagerNode = randomFrom(internalCluster().getNodeNames()); diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/SnapshotDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/SnapshotDisruptionIT.java index 3324b7de077fe..691e3ca51eb8c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/SnapshotDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/SnapshotDisruptionIT.java @@ -86,7 +86,7 @@ protected Settings nodeSettings(int nodeOrdinal) { public void testDisruptionAfterFinalization() throws Exception { final String idxName = "test"; - internalCluster().startMasterOnlyNodes(3); + internalCluster().startClusterManagerOnlyNodes(3); final String dataNode = internalCluster().startDataOnlyNode(); ensureStableCluster(4); @@ -94,7 +94,7 @@ public void testDisruptionAfterFinalization() throws Exception { createRepository("test-repo", "fs"); - final String clusterManagerNode1 = internalCluster().getMasterName(); + final String clusterManagerNode1 = internalCluster().getClusterManagerName(); NetworkDisruption networkDisruption = isolateClusterManagerDisruption(NetworkDisruption.UNRESPONSIVE); internalCluster().setDisruptionScheme(networkDisruption); @@ -168,7 +168,7 @@ public void clusterChanged(ClusterChangedEvent event) { public void testDisruptionAfterShardFinalization() throws Exception { final String idxName = "test"; - internalCluster().startMasterOnlyNodes(1); + internalCluster().startClusterManagerOnlyNodes(1); internalCluster().startDataOnlyNode(); ensureStableCluster(2); createIndex(idxName); @@ -177,7 +177,7 @@ public void testDisruptionAfterShardFinalization() throws Exception { final String repoName = "test-repo"; createRepository(repoName, "mock"); - final String clusterManagerNode = internalCluster().getMasterName(); + final String clusterManagerNode = internalCluster().getClusterManagerName(); blockAllDataNodes(repoName); @@ -212,7 +212,7 @@ public void testDisruptionAfterShardFinalization() throws Exception { index(idxName, "type", JsonXContent.contentBuilder().startObject().field("foo", "bar").endObject()); logger.info("--> run a snapshot that fails to finalize but succeeds on the data node"); - blockMasterFromFinalizingSnapshotOnIndexFile(repoName); + blockClusterManagerFromFinalizingSnapshotOnIndexFile(repoName); final ActionFuture snapshotFuture = client(clusterManagerNode).admin() .cluster() .prepareCreateSnapshot(repoName, "snapshot-2") @@ -236,7 +236,7 @@ public void testDisruptionAfterShardFinalization() throws Exception { } public void testClusterManagerFailOverDuringShardSnapshots() throws Exception { - internalCluster().startMasterOnlyNodes(3); + internalCluster().startClusterManagerOnlyNodes(3); final String dataNode = internalCluster().startDataOnlyNode(); ensureStableCluster(4); final String repoName = "test-repo"; @@ -249,7 +249,7 @@ public void testClusterManagerFailOverDuringShardSnapshots() throws Exception { blockDataNode(repoName, dataNode); logger.info("--> create snapshot via cluster-manager node client"); - final ActionFuture snapshotResponse = internalCluster().masterClient() + final ActionFuture snapshotResponse = internalCluster().clusterManagerClient() .admin() .cluster() .prepareCreateSnapshot(repoName, "test-snap") diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/StableClusterManagerDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/StableClusterManagerDisruptionIT.java index 8992e4749c1aa..ffbaa35f61b72 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/StableClusterManagerDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/StableClusterManagerDisruptionIT.java @@ -92,7 +92,7 @@ public void testFailWithMinimumClusterManagerNodesConfigured() throws Exception ensureStableCluster(3); // Figure out what is the elected cluster-manager node - final String clusterManagerNode = internalCluster().getMasterName(); + final String clusterManagerNode = internalCluster().getClusterManagerName(); logger.info("---> legit elected cluster-manager node={}", clusterManagerNode); // Pick a node that isn't the elected cluster-manager. @@ -123,7 +123,7 @@ public void testFailWithMinimumClusterManagerNodesConfigured() throws Exception ensureStableCluster(3); // The elected cluster-manager shouldn't have changed, since the unlucky node never could have elected itself as cluster-manager - assertThat(internalCluster().getMasterName(), equalTo(clusterManagerNode)); + assertThat(internalCluster().getClusterManagerName(), equalTo(clusterManagerNode)); } private void ensureNoMaster(String node) throws Exception { @@ -162,11 +162,11 @@ private void testFollowerCheckerAfterClusterManagerReelection(NetworkLinkDisrupt ensureStableCluster(4); logger.info("--> stopping current cluster-manager"); - internalCluster().stopCurrentMasterNode(); + internalCluster().stopCurrentClusterManagerNode(); ensureStableCluster(3); - final String clusterManager = internalCluster().getMasterName(); + final String clusterManager = internalCluster().getClusterManagerName(); final List nonClusterManagers = Arrays.stream(internalCluster().getNodeNames()) .filter(n -> clusterManager.equals(n) == false) .collect(Collectors.toList()); @@ -205,7 +205,7 @@ public void testStaleClusterManagerNotHijackingMajority() throws Exception { ensureStableCluster(3); // Save the current cluster-manager node as old cluster-manager node, because that node will get frozen - final String oldClusterManagerNode = internalCluster().getMasterName(); + final String oldClusterManagerNode = internalCluster().getClusterManagerName(); // Simulating a painful gc by suspending all threads for a long time on the current elected cluster-manager node. SingleNodeDisruption clusterManagerNodeDisruption = new LongGCDisruption(random(), oldClusterManagerNode); @@ -274,7 +274,7 @@ public void onFailure(String source, Exception e) { }); // Save the new elected cluster-manager node - final String newClusterManagerNode = internalCluster().getMasterName(majoritySide.get(0)); + final String newClusterManagerNode = internalCluster().getClusterManagerName(majoritySide.get(0)); logger.info("--> new detected cluster-manager node [{}]", newClusterManagerNode); // Stop disruption diff --git a/server/src/internalClusterTest/java/org/opensearch/index/mapper/DynamicMappingIT.java b/server/src/internalClusterTest/java/org/opensearch/index/mapper/DynamicMappingIT.java index c90c5f45af176..f9b2fb44e24be 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/mapper/DynamicMappingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/mapper/DynamicMappingIT.java @@ -148,7 +148,7 @@ public void testPreflightCheckAvoidsClusterManager() throws InterruptedException final CountDownLatch clusterManagerBlockedLatch = new CountDownLatch(1); final CountDownLatch indexingCompletedLatch = new CountDownLatch(1); - internalCluster().getInstance(ClusterService.class, internalCluster().getMasterName()) + internalCluster().getInstance(ClusterService.class, internalCluster().getClusterManagerName()) .submitStateUpdateTask("block-state-updates", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java index 82131b0260f49..bb1456c8b5d4f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java @@ -775,7 +775,7 @@ public void testSnapshotRecovery() throws Exception { logger.info("--> request recoveries"); RecoveryResponse response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet(); - Repository repository = internalCluster().getMasterNodeInstance(RepositoriesService.class).repository(REPO_NAME); + Repository repository = internalCluster().getClusterManagerNodeInstance(RepositoriesService.class).repository(REPO_NAME); final RepositoryData repositoryData = PlainActionFuture.get(repository::getRepositoryData); for (Map.Entry> indexRecoveryStates : response.shardRecoveryStates().entrySet()) { diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/ReplicaToPrimaryPromotionIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/ReplicaToPrimaryPromotionIT.java index b74dc2f4236a0..939743355f5f4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/ReplicaToPrimaryPromotionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/ReplicaToPrimaryPromotionIT.java @@ -80,7 +80,7 @@ public void testPromoteReplicaToPrimary() throws Exception { } // pick up a data node that contains a random primary shard - ClusterState state = client(internalCluster().getMasterName()).admin().cluster().prepareState().get().getState(); + ClusterState state = client(internalCluster().getClusterManagerName()).admin().cluster().prepareState().get().getState(); final int numShards = state.metadata().index(indexName).getNumberOfShards(); final ShardRouting primaryShard = state.routingTable().index(indexName).shard(randomIntBetween(0, numShards - 1)).primaryShard(); final DiscoveryNode randomNode = state.nodes().resolveNode(primaryShard.currentNodeId()); @@ -89,7 +89,7 @@ public void testPromoteReplicaToPrimary() throws Exception { internalCluster().stopRandomNode(InternalTestCluster.nameFilter(randomNode.getName())); ensureYellowAndNoInitializingShards(indexName); - state = client(internalCluster().getMasterName()).admin().cluster().prepareState().get().getState(); + state = client(internalCluster().getClusterManagerName()).admin().cluster().prepareState().get().getState(); for (IndexShardRoutingTable shardRoutingTable : state.routingTable().index(indexName)) { for (ShardRouting shardRouting : shardRoutingTable.activeShards()) { assertThat(shardRouting + " should be promoted as a primary", shardRouting.primary(), is(true)); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/settings/UpdateSettingsIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/settings/UpdateSettingsIT.java index 6dab7781e08db..9970ff99a806c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/settings/UpdateSettingsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/settings/UpdateSettingsIT.java @@ -836,7 +836,7 @@ private void runTestDefaultNumberOfReplicasTest(final boolean closeIndex) { public void testNoopUpdate() { internalCluster().ensureAtLeastNumDataNodes(2); - final ClusterService clusterService = internalCluster().getMasterNodeInstance(ClusterService.class); + final ClusterService clusterService = internalCluster().getClusterManagerNodeInstance(ClusterService.class); assertAcked( client().admin() .indices() diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseWhileRelocatingShardsIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseWhileRelocatingShardsIT.java index 3d70622e122c0..cdebe6c869be5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseWhileRelocatingShardsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseWhileRelocatingShardsIT.java @@ -150,7 +150,10 @@ public void testCloseWhileRelocatingShards() throws Exception { ensureClusterSizeConsistency(); // wait for the cluster-manager to finish processing join. try { - final ClusterService clusterService = internalCluster().getInstance(ClusterService.class, internalCluster().getMasterName()); + final ClusterService clusterService = internalCluster().getInstance( + ClusterService.class, + internalCluster().getClusterManagerName() + ); final ClusterState state = clusterService.state(); final CountDownLatch latch = new CountDownLatch(indices.length); final CountDownLatch release = new CountDownLatch(indices.length); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/state/ReopenWhileClosingIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/state/ReopenWhileClosingIT.java index 38e50a64c8105..028baa0176367 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/state/ReopenWhileClosingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/state/ReopenWhileClosingIT.java @@ -150,7 +150,7 @@ private void createIndexWithDocs(final String indexName, final Collection creating index [test] with one shard and on replica"); assertAcked( prepareCreate("test").setSettings( @@ -133,7 +133,7 @@ public void testIndexCleanup() throws Exception { assertThat(Files.exists(indexDirectory(node_2, index)), equalTo(true)); logger.info("--> starting node server3"); - final String node_3 = internalCluster().startNode(nonMasterNode()); + final String node_3 = internalCluster().startNode(nonClusterManagerNode()); logger.info("--> running cluster_health"); ClusterHealthResponse clusterHealth = client().admin() .cluster() @@ -459,7 +459,7 @@ public void testShardActiveElsewhereDoesNotDeleteAnother() throws Exception { public void testShardActiveElseWhere() throws Exception { List nodes = internalCluster().startNodes(2); - final String clusterManagerNode = internalCluster().getMasterName(); + final String clusterManagerNode = internalCluster().getClusterManagerName(); final String nonClusterManagerNode = nodes.get(0).equals(clusterManagerNode) ? nodes.get(1) : nodes.get(0); final String clusterManagerId = internalCluster().clusterService(clusterManagerNode).localNode().getId(); diff --git a/server/src/internalClusterTest/java/org/opensearch/persistent/PersistentTasksExecutorIT.java b/server/src/internalClusterTest/java/org/opensearch/persistent/PersistentTasksExecutorIT.java index d4ce36ff0575e..13d7e838b920a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/persistent/PersistentTasksExecutorIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/persistent/PersistentTasksExecutorIT.java @@ -224,7 +224,7 @@ public void testPersistentActionWithNoAvailableNode() throws Exception { public void testPersistentActionWithNonClusterStateCondition() throws Exception { PersistentTasksClusterService persistentTasksClusterService = internalCluster().getInstance( PersistentTasksClusterService.class, - internalCluster().getMasterName() + internalCluster().getClusterManagerName() ); // Speed up rechecks to a rate that is quicker than what settings would allow persistentTasksClusterService.setRecheckInterval(TimeValue.timeValueMillis(1)); @@ -384,7 +384,7 @@ public void testCreatePersistentTaskWithDuplicateId() throws Exception { public void testUnassignRunningPersistentTask() throws Exception { PersistentTasksClusterService persistentTasksClusterService = internalCluster().getInstance( PersistentTasksClusterService.class, - internalCluster().getMasterName() + internalCluster().getClusterManagerName() ); // Speed up rechecks to a rate that is quicker than what settings would allow persistentTasksClusterService.setRecheckInterval(TimeValue.timeValueMillis(1)); diff --git a/server/src/internalClusterTest/java/org/opensearch/persistent/decider/EnableAssignmentDeciderIT.java b/server/src/internalClusterTest/java/org/opensearch/persistent/decider/EnableAssignmentDeciderIT.java index 5d75482567731..aa1eeacbadd9d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/persistent/decider/EnableAssignmentDeciderIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/persistent/decider/EnableAssignmentDeciderIT.java @@ -87,7 +87,7 @@ public void testEnableAssignmentAfterRestart() throws Exception { } latch.await(); - ClusterService clusterService = internalCluster().clusterService(internalCluster().getMasterName()); + ClusterService clusterService = internalCluster().clusterService(internalCluster().getClusterManagerName()); PersistentTasksCustomMetadata tasks = clusterService.state().getMetadata().custom(PersistentTasksCustomMetadata.TYPE); assertEquals(numberOfTasks, tasks.tasks().stream().filter(t -> TestPersistentTasksExecutor.NAME.equals(t.getTaskName())).count()); diff --git a/server/src/internalClusterTest/java/org/opensearch/repositories/RepositoriesServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/repositories/RepositoriesServiceIT.java index 78cf55e2f223c..84178f0255d81 100644 --- a/server/src/internalClusterTest/java/org/opensearch/repositories/RepositoriesServiceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/repositories/RepositoriesServiceIT.java @@ -65,7 +65,9 @@ public void testUpdateRepository() { final String repositoryName = "test-repo"; final Client client = client(); - final RepositoriesService repositoriesService = cluster.getDataOrMasterNodeInstances(RepositoriesService.class).iterator().next(); + final RepositoriesService repositoriesService = cluster.getDataOrClusterManagerNodeInstances(RepositoriesService.class) + .iterator() + .next(); final Settings.Builder repoSettings = Settings.builder().put("location", randomRepoPath()); diff --git a/server/src/internalClusterTest/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java b/server/src/internalClusterTest/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java index 6450644314c08..f3954578bffc8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java @@ -56,7 +56,7 @@ public void testClusterManagerFailoverDuringCleanup() throws Exception { final int nodeCount = internalCluster().numDataAndMasterNodes(); logger.info("--> stopping cluster-manager node"); - internalCluster().stopCurrentMasterNode(); + internalCluster().stopCurrentClusterManagerNode(); ensureStableCluster(nodeCount - 1); @@ -92,7 +92,7 @@ public void testRepeatCleanupsDontRemove() throws Exception { private String startBlockedCleanup(String repoName) throws Exception { logger.info("--> starting two cluster-manager nodes and one data node"); - internalCluster().startMasterOnlyNodes(2); + internalCluster().startClusterManagerOnlyNodes(2); internalCluster().startDataOnlyNodes(1); createRepository(repoName, "mock"); @@ -100,7 +100,10 @@ private String startBlockedCleanup(String repoName) throws Exception { logger.info("--> snapshot"); client().admin().cluster().prepareCreateSnapshot(repoName, "test-snap").setWaitForCompletion(true).get(); - final RepositoriesService service = internalCluster().getInstance(RepositoriesService.class, internalCluster().getMasterName()); + final RepositoriesService service = internalCluster().getInstance( + RepositoriesService.class, + internalCluster().getClusterManagerName() + ); final BlobStoreRepository repository = (BlobStoreRepository) service.repository(repoName); logger.info("--> creating a garbage data blob"); @@ -117,7 +120,7 @@ private String startBlockedCleanup(String repoName) throws Exception { ); garbageFuture.get(); - final String clusterManagerNode = blockMasterFromFinalizingSnapshotOnIndexFile(repoName); + final String clusterManagerNode = blockClusterManagerFromFinalizingSnapshotOnIndexFile(repoName); logger.info("--> starting repository cleanup"); client().admin().cluster().prepareCleanupRepository(repoName).execute(); @@ -146,7 +149,10 @@ public void testCleanupOldIndexN() throws ExecutionException, InterruptedExcepti assertThat(createSnapshotResponse.getSnapshotInfo().state(), is(SnapshotState.SUCCESS)); } - final RepositoriesService service = internalCluster().getInstance(RepositoriesService.class, internalCluster().getMasterName()); + final RepositoriesService service = internalCluster().getInstance( + RepositoriesService.class, + internalCluster().getClusterManagerName() + ); final BlobStoreRepository repository = (BlobStoreRepository) service.repository(repoName); logger.info("--> write two outdated index-N blobs"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollWithFailingNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollWithFailingNodesIT.java index a56f8667fab48..c6519cc3a0cb3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollWithFailingNodesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollWithFailingNodesIT.java @@ -97,7 +97,7 @@ public void testScanScrollWithShardExceptions() throws Exception { assertThat(numHits, equalTo(100L)); clearScroll("_all"); - internalCluster().stopRandomNonMasterNode(); + internalCluster().stopRandomNonClusterManagerNode(); searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(10).setScroll(TimeValue.timeValueMinutes(1)).get(); assertThat(searchResponse.getSuccessfulShards(), lessThan(searchResponse.getTotalShards())); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java index d5f36608941d5..7b95bf93f8bf4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java @@ -91,7 +91,7 @@ public void testShardClone() throws Exception { final String sourceSnapshot = "source-snapshot"; final SnapshotInfo sourceSnapshotInfo = createFullSnapshot(repoName, sourceSnapshot); - final BlobStoreRepository repository = (BlobStoreRepository) internalCluster().getCurrentMasterNodeInstance( + final BlobStoreRepository repository = (BlobStoreRepository) internalCluster().getCurrentClusterManagerNodeInstance( RepositoriesService.class ).repository(repoName); final RepositoryData repositoryData = getRepositoryData(repoName); @@ -378,7 +378,7 @@ public void testBackToBackClonesForIndexNotInCluster() throws Exception { } public void testClusterManagerFailoverDuringCloneStep1() throws Exception { - internalCluster().startMasterOnlyNodes(3); + internalCluster().startClusterManagerOnlyNodes(3); internalCluster().startDataOnlyNode(); final String repoName = "test-repo"; createRepository(repoName, "mock"); @@ -392,7 +392,7 @@ public void testClusterManagerFailoverDuringCloneStep1() throws Exception { final String cloneName = "target-snapshot"; final ActionFuture cloneFuture = startCloneFromDataNode(repoName, sourceSnapshot, cloneName, testIndex); awaitNumberOfSnapshotsInProgress(1); - final String clusterManagerNode = internalCluster().getMasterName(); + final String clusterManagerNode = internalCluster().getClusterManagerName(); waitForBlock(clusterManagerNode, repoName, TimeValue.timeValueSeconds(30L)); internalCluster().restartNode(clusterManagerNode); boolean cloneSucceeded = false; @@ -404,7 +404,7 @@ public void testClusterManagerFailoverDuringCloneStep1() throws Exception { // snapshot on disconnect slowly enough for it to work out } - awaitNoMoreRunningOperations(internalCluster().getMasterName()); + awaitNoMoreRunningOperations(internalCluster().getClusterManagerName()); // Check if the clone operation worked out by chance as a result of the clone request being retried // because of the cluster-manager failover @@ -433,7 +433,7 @@ public void testFailsOnCloneMissingIndices() { public void testClusterManagerFailoverDuringCloneStep2() throws Exception { // large snapshot pool so blocked snapshot threads from cloning don't prevent concurrent snapshot finalizations - internalCluster().startMasterOnlyNodes(3, LARGE_SNAPSHOT_POOL_SETTINGS); + internalCluster().startClusterManagerOnlyNodes(3, LARGE_SNAPSHOT_POOL_SETTINGS); internalCluster().startDataOnlyNode(); final String repoName = "test-repo"; createRepository(repoName, "mock"); @@ -447,18 +447,18 @@ public void testClusterManagerFailoverDuringCloneStep2() throws Exception { blockClusterManagerOnShardClone(repoName); final ActionFuture cloneFuture = startCloneFromDataNode(repoName, sourceSnapshot, targetSnapshot, testIndex); awaitNumberOfSnapshotsInProgress(1); - final String clusterManagerNode = internalCluster().getMasterName(); + final String clusterManagerNode = internalCluster().getClusterManagerName(); waitForBlock(clusterManagerNode, repoName, TimeValue.timeValueSeconds(30L)); internalCluster().restartNode(clusterManagerNode); expectThrows(SnapshotException.class, cloneFuture::actionGet); - awaitNoMoreRunningOperations(internalCluster().getMasterName()); + awaitNoMoreRunningOperations(internalCluster().getClusterManagerName()); assertAllSnapshotsSuccessful(getRepositoryData(repoName), 2); } public void testExceptionDuringShardClone() throws Exception { // large snapshot pool so blocked snapshot threads from cloning don't prevent concurrent snapshot finalizations - internalCluster().startMasterOnlyNodes(3, LARGE_SNAPSHOT_POOL_SETTINGS); + internalCluster().startClusterManagerOnlyNodes(3, LARGE_SNAPSHOT_POOL_SETTINGS); internalCluster().startDataOnlyNode(); final String repoName = "test-repo"; createRepository(repoName, "mock"); @@ -469,14 +469,14 @@ public void testExceptionDuringShardClone() throws Exception { createFullSnapshot(repoName, sourceSnapshot); final String targetSnapshot = "target-snapshot"; - blockMasterFromFinalizingSnapshotOnSnapFile(repoName); + blockClusterManagerFromFinalizingSnapshotOnSnapFile(repoName); final ActionFuture cloneFuture = startCloneFromDataNode(repoName, sourceSnapshot, targetSnapshot, testIndex); awaitNumberOfSnapshotsInProgress(1); - final String clusterManagerNode = internalCluster().getMasterName(); + final String clusterManagerNode = internalCluster().getClusterManagerName(); waitForBlock(clusterManagerNode, repoName, TimeValue.timeValueSeconds(30L)); unblockNode(repoName, clusterManagerNode); expectThrows(SnapshotException.class, cloneFuture::actionGet); - awaitNoMoreRunningOperations(internalCluster().getMasterName()); + awaitNoMoreRunningOperations(internalCluster().getClusterManagerName()); assertAllSnapshotsSuccessful(getRepositoryData(repoName), 1); assertAcked(startDeleteSnapshot(repoName, sourceSnapshot).get()); } @@ -491,7 +491,7 @@ public void testDoesNotStartOnBrokenSourceSnapshot() throws Exception { final String sourceSnapshot = "source-snapshot"; blockDataNode(repoName, dataNode); - final Client clusterManagerClient = internalCluster().masterClient(); + final Client clusterManagerClient = internalCluster().clusterManagerClient(); final ActionFuture sourceSnapshotFuture = clusterManagerClient.admin() .cluster() .prepareCreateSnapshot(repoName, sourceSnapshot) @@ -528,7 +528,7 @@ public void testStartSnapshotWithSuccessfulShardClonePendingFinalization() throw final String sourceSnapshot = "source-snapshot"; createFullSnapshot(repoName, sourceSnapshot); - blockMasterOnWriteIndexFile(repoName); + blockClusterManagerOnWriteIndexFile(repoName); final String cloneName = "clone-blocked"; final ActionFuture blockedClone = startClone(repoName, sourceSnapshot, cloneName, indexName); waitForBlock(clusterManagerName, repoName, TimeValue.timeValueSeconds(30L)); @@ -558,7 +558,7 @@ public void testStartCloneWithSuccessfulShardClonePendingFinalization() throws E final String sourceSnapshot = "source-snapshot"; createFullSnapshot(repoName, sourceSnapshot); - blockMasterOnWriteIndexFile(repoName); + blockClusterManagerOnWriteIndexFile(repoName); final String cloneName = "clone-blocked"; final ActionFuture blockedClone = startClone(repoName, sourceSnapshot, cloneName, indexName); waitForBlock(clusterManagerName, repoName, TimeValue.timeValueSeconds(30L)); @@ -588,7 +588,7 @@ public void testStartCloneWithSuccessfulShardSnapshotPendingFinalization() throw final String sourceSnapshot = "source-snapshot"; createFullSnapshot(repoName, sourceSnapshot); - blockMasterOnWriteIndexFile(repoName); + blockClusterManagerOnWriteIndexFile(repoName); final ActionFuture blockedSnapshot = startFullSnapshot(repoName, "snap-blocked"); waitForBlock(clusterManagerName, repoName, TimeValue.timeValueSeconds(30L)); awaitNumberOfSnapshotsInProgress(1); @@ -643,12 +643,12 @@ private static ActionFuture startClone( } private void blockClusterManagerOnReadIndexMeta(String repoName) { - ((MockRepository) internalCluster().getCurrentMasterNodeInstance(RepositoriesService.class).repository(repoName)) + ((MockRepository) internalCluster().getCurrentClusterManagerNodeInstance(RepositoriesService.class).repository(repoName)) .setBlockOnReadIndexMeta(); } private void blockClusterManagerOnShardClone(String repoName) { - ((MockRepository) internalCluster().getCurrentMasterNodeInstance(RepositoriesService.class).repository(repoName)) + ((MockRepository) internalCluster().getCurrentClusterManagerNodeInstance(RepositoriesService.class).repository(repoName)) .setBlockOnWriteShardLevelMeta(); } diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java index 04ec3f027f908..b6d482cad8860 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java @@ -224,7 +224,7 @@ public void testBlockedRepoDoesNotBlockOtherRepos() throws Exception { .setWaitForCompletion(false) .get(); - unblockNode(blockedRepoName, internalCluster().getMasterName()); + unblockNode(blockedRepoName, internalCluster().getClusterManagerName()); expectThrows(SnapshotException.class, createSlowFuture::actionGet); assertBusy(() -> assertThat(currentSnapshots(otherRepoName), empty()), 30L, TimeUnit.SECONDS); @@ -312,7 +312,7 @@ public void testSnapshotRunsAfterInProgressDelete() throws Exception { final String firstSnapshot = "first-snapshot"; createFullSnapshot(repoName, firstSnapshot); - blockMasterFromFinalizingSnapshotOnIndexFile(repoName); + blockClusterManagerFromFinalizingSnapshotOnIndexFile(repoName); final ActionFuture deleteFuture = startDeleteSnapshot(repoName, firstSnapshot); waitForBlock(clusterManagerNode, repoName, TimeValue.timeValueSeconds(30L)); @@ -448,7 +448,7 @@ public void testCascadedAborts() throws Exception { } public void testClusterManagerFailOverWithQueuedDeletes() throws Exception { - internalCluster().startMasterOnlyNodes(3); + internalCluster().startClusterManagerOnlyNodes(3); final String dataNode = internalCluster().startDataOnlyNode(); final String repoName = "test-repo"; createRepository(repoName, "mock"); @@ -513,7 +513,7 @@ public void testClusterManagerFailOverWithQueuedDeletes() throws Exception { }, 30L, TimeUnit.SECONDS); logger.info("--> stopping current cluster-manager node"); - internalCluster().stopCurrentMasterNode(); + internalCluster().stopCurrentClusterManagerNode(); unblockNode(repoName, dataNode); unblockNode(repoName, dataNode2); @@ -591,7 +591,7 @@ public void testQueuedDeletesWithFailures() throws Exception { createIndexWithContent("index-one"); createNSnapshots(repoName, randomIntBetween(2, 5)); - blockMasterFromFinalizingSnapshotOnIndexFile(repoName); + blockClusterManagerFromFinalizingSnapshotOnIndexFile(repoName); final ActionFuture firstDeleteFuture = startDeleteSnapshot(repoName, "*"); waitForBlock(clusterManagerNode, repoName, TimeValue.timeValueSeconds(30L)); @@ -641,7 +641,7 @@ public void testQueuedDeletesWithOverlap() throws Exception { } public void testQueuedOperationsOnClusterManagerRestart() throws Exception { - internalCluster().startMasterOnlyNodes(3); + internalCluster().startClusterManagerOnlyNodes(3); internalCluster().startDataOnlyNode(); final String repoName = "test-repo"; createRepository(repoName, "mock"); @@ -655,21 +655,21 @@ public void testQueuedOperationsOnClusterManagerRestart() throws Exception { startDeleteSnapshot(repoName, "*"); awaitNDeletionsInProgress(2); - internalCluster().stopCurrentMasterNode(); + internalCluster().stopCurrentClusterManagerNode(); ensureStableCluster(3); awaitNoMoreRunningOperations(); } public void testQueuedOperationsOnClusterManagerDisconnect() throws Exception { - internalCluster().startMasterOnlyNodes(3); + internalCluster().startClusterManagerOnlyNodes(3); final String dataNode = internalCluster().startDataOnlyNode(); final String repoName = "test-repo"; createRepository(repoName, "mock"); createIndexWithContent("index-one"); createNSnapshots(repoName, randomIntBetween(2, 5)); - final String clusterManagerNode = internalCluster().getMasterName(); + final String clusterManagerNode = internalCluster().getClusterManagerName(); final NetworkDisruption networkDisruption = isolateClusterManagerDisruption(NetworkDisruption.DISCONNECT); internalCluster().setDisruptionScheme(networkDisruption); @@ -707,18 +707,18 @@ public void testQueuedOperationsOnClusterManagerDisconnect() throws Exception { } public void testQueuedOperationsOnClusterManagerDisconnectAndRepoFailure() throws Exception { - internalCluster().startMasterOnlyNodes(3); + internalCluster().startClusterManagerOnlyNodes(3); final String dataNode = internalCluster().startDataOnlyNode(); final String repoName = "test-repo"; createRepository(repoName, "mock"); createIndexWithContent("index-one"); createNSnapshots(repoName, randomIntBetween(2, 5)); - final String clusterManagerNode = internalCluster().getMasterName(); + final String clusterManagerNode = internalCluster().getClusterManagerName(); final NetworkDisruption networkDisruption = isolateClusterManagerDisruption(NetworkDisruption.DISCONNECT); internalCluster().setDisruptionScheme(networkDisruption); - blockMasterFromFinalizingSnapshotOnIndexFile(repoName); + blockClusterManagerFromFinalizingSnapshotOnIndexFile(repoName); final ActionFuture firstFailedSnapshotFuture = startFullSnapshotFromClusterManagerClient( repoName, "failing-snapshot-1" @@ -752,7 +752,7 @@ public void testQueuedOperationsOnClusterManagerDisconnectAndRepoFailure() throw public void testQueuedOperationsAndBrokenRepoOnClusterManagerFailOver() throws Exception { disableRepoConsistencyCheck("This test corrupts the repository on purpose"); - internalCluster().startMasterOnlyNodes(3); + internalCluster().startClusterManagerOnlyNodes(3); internalCluster().startDataOnlyNode(); final String repoName = "test-repo"; final Path repoPath = randomRepoPath(); @@ -771,7 +771,7 @@ public void testQueuedOperationsAndBrokenRepoOnClusterManagerFailOver() throws E final ActionFuture deleteFuture = startDeleteFromNonClusterManagerClient(repoName, "*"); awaitNDeletionsInProgress(2); - internalCluster().stopCurrentMasterNode(); + internalCluster().stopCurrentClusterManagerNode(); ensureStableCluster(3); awaitNoMoreRunningOperations(); @@ -781,7 +781,7 @@ public void testQueuedOperationsAndBrokenRepoOnClusterManagerFailOver() throws E public void testQueuedSnapshotOperationsAndBrokenRepoOnClusterManagerFailOver() throws Exception { disableRepoConsistencyCheck("This test corrupts the repository on purpose"); - internalCluster().startMasterOnlyNodes(3); + internalCluster().startClusterManagerOnlyNodes(3); internalCluster().startDataOnlyNode(); final String repoName = "test-repo"; final Path repoPath = randomRepoPath(); @@ -790,7 +790,7 @@ public void testQueuedSnapshotOperationsAndBrokenRepoOnClusterManagerFailOver() createNSnapshots(repoName, randomIntBetween(2, 5)); final long generation = getRepositoryData(repoName).getGenId(); - final String clusterManagerNode = internalCluster().getMasterName(); + final String clusterManagerNode = internalCluster().getClusterManagerName(); blockNodeOnAnyFiles(repoName, clusterManagerNode); final ActionFuture snapshotThree = startFullSnapshotFromNonClusterManagerClient(repoName, "snapshot-three"); waitForBlock(clusterManagerNode, repoName, TimeValue.timeValueSeconds(30L)); @@ -798,7 +798,7 @@ public void testQueuedSnapshotOperationsAndBrokenRepoOnClusterManagerFailOver() corruptIndexN(repoPath, generation); final ActionFuture snapshotFour = startFullSnapshotFromNonClusterManagerClient(repoName, "snapshot-four"); - internalCluster().stopCurrentMasterNode(); + internalCluster().stopCurrentClusterManagerNode(); ensureStableCluster(3); awaitNoMoreRunningOperations(); @@ -809,7 +809,7 @@ public void testQueuedSnapshotOperationsAndBrokenRepoOnClusterManagerFailOver() public void testQueuedSnapshotOperationsAndBrokenRepoOnClusterManagerFailOver2() throws Exception { disableRepoConsistencyCheck("This test corrupts the repository on purpose"); - internalCluster().startMasterOnlyNodes(3); + internalCluster().startClusterManagerOnlyNodes(3); final String dataNode = internalCluster().startDataOnlyNode(); final String repoName = "test-repo"; final Path repoPath = randomRepoPath(); @@ -818,8 +818,8 @@ public void testQueuedSnapshotOperationsAndBrokenRepoOnClusterManagerFailOver2() createNSnapshots(repoName, randomIntBetween(2, 5)); final long generation = getRepositoryData(repoName).getGenId(); - final String clusterManagerNode = internalCluster().getMasterName(); - blockMasterFromFinalizingSnapshotOnIndexFile(repoName); + final String clusterManagerNode = internalCluster().getClusterManagerName(); + blockClusterManagerFromFinalizingSnapshotOnIndexFile(repoName); final ActionFuture snapshotThree = startFullSnapshotFromNonClusterManagerClient(repoName, "snapshot-three"); waitForBlock(clusterManagerNode, repoName, TimeValue.timeValueSeconds(30L)); @@ -842,7 +842,7 @@ public void testQueuedSnapshotOperationsAndBrokenRepoOnClusterManagerFailOver2() public void testQueuedSnapshotOperationsAndBrokenRepoOnClusterManagerFailOverMultipleRepos() throws Exception { disableRepoConsistencyCheck("This test corrupts the repository on purpose"); - internalCluster().startMasterOnlyNodes(3, LARGE_SNAPSHOT_POOL_SETTINGS); + internalCluster().startClusterManagerOnlyNodes(3, LARGE_SNAPSHOT_POOL_SETTINGS); internalCluster().startDataOnlyNode(); final String repoName = "test-repo"; final Path repoPath = randomRepoPath(); @@ -850,7 +850,7 @@ public void testQueuedSnapshotOperationsAndBrokenRepoOnClusterManagerFailOverMul createIndexWithContent("index-one"); createNSnapshots(repoName, randomIntBetween(2, 5)); - final String clusterManagerNode = internalCluster().getMasterName(); + final String clusterManagerNode = internalCluster().getClusterManagerName(); final String blockedRepoName = "repo-blocked"; createRepository(blockedRepoName, "mock"); @@ -875,7 +875,7 @@ public void testQueuedSnapshotOperationsAndBrokenRepoOnClusterManagerFailOverMul final ActionFuture snapshotFour = startFullSnapshotFromNonClusterManagerClient(repoName, "snapshot-four"); awaitNumberOfSnapshotsInProgress(3); - internalCluster().stopCurrentMasterNode(); + internalCluster().stopCurrentClusterManagerNode(); ensureStableCluster(3); awaitNoMoreRunningOperations(); @@ -1014,7 +1014,7 @@ public void testBackToBackQueuedDeletes() throws Exception { } public void testQueuedOperationsAfterFinalizationFailure() throws Exception { - internalCluster().startMasterOnlyNodes(3); + internalCluster().startClusterManagerOnlyNodes(3); internalCluster().startDataOnlyNode(); final String repoName = "test-repo"; createRepository(repoName, "mock"); @@ -1024,7 +1024,7 @@ public void testQueuedOperationsAfterFinalizationFailure() throws Exception { final ActionFuture snapshotThree = startAndBlockFailingFullSnapshot(repoName, "snap-other"); - final String clusterManagerName = internalCluster().getMasterName(); + final String clusterManagerName = internalCluster().getClusterManagerName(); final String snapshotOne = snapshotNames.get(0); final ActionFuture deleteSnapshotOne = startDeleteSnapshot(repoName, snapshotOne); @@ -1044,7 +1044,7 @@ public void testStartDeleteDuringFinalizationCleanup() throws Exception { createIndexWithContent("index-test"); createNSnapshots(repoName, randomIntBetween(1, 5)); final String snapshotName = "snap-name"; - blockMasterFromDeletingIndexNFile(repoName); + blockClusterManagerFromDeletingIndexNFile(repoName); final ActionFuture snapshotFuture = startFullSnapshot(repoName, snapshotName); waitForBlock(clusterManagerName, repoName, TimeValue.timeValueSeconds(30L)); final ActionFuture deleteFuture = startDeleteSnapshot(repoName, snapshotName); @@ -1081,7 +1081,7 @@ public void testEquivalentDeletesAreDeduplicated() throws Exception { } public void testClusterManagerFailoverOnFinalizationLoop() throws Exception { - internalCluster().startMasterOnlyNodes(3); + internalCluster().startClusterManagerOnlyNodes(3); final String dataNode = internalCluster().startDataOnlyNode(); final String repoName = "test-repo"; createRepository(repoName, "mock"); @@ -1090,8 +1090,8 @@ public void testClusterManagerFailoverOnFinalizationLoop() throws Exception { internalCluster().setDisruptionScheme(networkDisruption); final List snapshotNames = createNSnapshots(repoName, randomIntBetween(2, 5)); - final String clusterManagerName = internalCluster().getMasterName(); - blockMasterFromDeletingIndexNFile(repoName); + final String clusterManagerName = internalCluster().getClusterManagerName(); + blockClusterManagerFromDeletingIndexNFile(repoName); final ActionFuture snapshotThree = startFullSnapshotFromClusterManagerClient(repoName, "snap-other"); waitForBlock(clusterManagerName, repoName, TimeValue.timeValueSeconds(30L)); @@ -1196,7 +1196,7 @@ public void testInterleavedAcrossMultipleRepos() throws Exception { public void testClusterManagerFailoverAndMultipleQueuedUpSnapshotsAcrossTwoRepos() throws Exception { disableRepoConsistencyCheck("This test corrupts the repository on purpose"); - internalCluster().startMasterOnlyNodes(3, LARGE_SNAPSHOT_POOL_SETTINGS); + internalCluster().startClusterManagerOnlyNodes(3, LARGE_SNAPSHOT_POOL_SETTINGS); final String dataNode = internalCluster().startDataOnlyNode(); final String repoName = "test-repo"; final String otherRepoName = "other-test-repo"; @@ -1210,8 +1210,8 @@ public void testClusterManagerFailoverAndMultipleQueuedUpSnapshotsAcrossTwoRepos corruptIndexN(repoPath, getRepositoryData(repoName).getGenId()); - blockMasterFromFinalizingSnapshotOnIndexFile(repoName); - blockMasterFromFinalizingSnapshotOnIndexFile(otherRepoName); + blockClusterManagerFromFinalizingSnapshotOnIndexFile(repoName); + blockClusterManagerFromFinalizingSnapshotOnIndexFile(otherRepoName); client().admin().cluster().prepareCreateSnapshot(repoName, "snapshot-blocked-1").setWaitForCompletion(false).get(); client().admin().cluster().prepareCreateSnapshot(repoName, "snapshot-blocked-2").setWaitForCompletion(false).get(); @@ -1219,11 +1219,11 @@ public void testClusterManagerFailoverAndMultipleQueuedUpSnapshotsAcrossTwoRepos client().admin().cluster().prepareCreateSnapshot(otherRepoName, "snapshot-other-blocked-2").setWaitForCompletion(false).get(); awaitNumberOfSnapshotsInProgress(4); - final String initialClusterManager = internalCluster().getMasterName(); + final String initialClusterManager = internalCluster().getClusterManagerName(); waitForBlock(initialClusterManager, repoName, TimeValue.timeValueSeconds(30L)); waitForBlock(initialClusterManager, otherRepoName, TimeValue.timeValueSeconds(30L)); - internalCluster().stopCurrentMasterNode(); + internalCluster().stopCurrentClusterManagerNode(); ensureStableCluster(3, dataNode); awaitNoMoreRunningOperations(); @@ -1338,7 +1338,7 @@ public void testQueuedDeleteAfterFinalizationFailure() throws Exception { final String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); final String repoName = "test-repo"; createRepository(repoName, "mock"); - blockMasterFromFinalizingSnapshotOnIndexFile(repoName); + blockClusterManagerFromFinalizingSnapshotOnIndexFile(repoName); final String snapshotName = "snap-1"; final ActionFuture snapshotFuture = startFullSnapshot(repoName, snapshotName); waitForBlock(clusterManagerNode, repoName, TimeValue.timeValueSeconds(30L)); @@ -1386,7 +1386,7 @@ public void testStartWithSuccessfulShardSnapshotPendingFinalization() throws Exc createIndexWithContent("test-idx"); createFullSnapshot(repoName, "first-snapshot"); - blockMasterOnWriteIndexFile(repoName); + blockClusterManagerOnWriteIndexFile(repoName); final ActionFuture blockedSnapshot = startFullSnapshot(repoName, "snap-blocked"); waitForBlock(clusterManagerName, repoName, TimeValue.timeValueSeconds(30L)); awaitNumberOfSnapshotsInProgress(1); @@ -1431,12 +1431,12 @@ private List createNSnapshots(String repoName, int count) { private ActionFuture startDeleteFromNonClusterManagerClient(String repoName, String snapshotName) { logger.info("--> deleting snapshot [{}] from repo [{}] from non cluster-manager client", snapshotName, repoName); - return internalCluster().nonMasterClient().admin().cluster().prepareDeleteSnapshot(repoName, snapshotName).execute(); + return internalCluster().nonClusterManagerClient().admin().cluster().prepareDeleteSnapshot(repoName, snapshotName).execute(); } private ActionFuture startFullSnapshotFromNonClusterManagerClient(String repoName, String snapshotName) { logger.info("--> creating full snapshot [{}] to repo [{}] from non cluster-manager client", snapshotName, repoName); - return internalCluster().nonMasterClient() + return internalCluster().nonClusterManagerClient() .admin() .cluster() .prepareCreateSnapshot(repoName, snapshotName) @@ -1446,7 +1446,7 @@ private ActionFuture startFullSnapshotFromNonClusterMana private ActionFuture startFullSnapshotFromClusterManagerClient(String repoName, String snapshotName) { logger.info("--> creating full snapshot [{}] to repo [{}] from cluster-manager client", snapshotName, repoName); - return internalCluster().masterClient() + return internalCluster().clusterManagerClient() .admin() .cluster() .prepareCreateSnapshot(repoName, snapshotName) @@ -1501,7 +1501,7 @@ private static List currentSnapshots(String repoName) { private ActionFuture startAndBlockOnDeleteSnapshot(String repoName, String snapshotName) throws InterruptedException { - final String clusterManagerName = internalCluster().getMasterName(); + final String clusterManagerName = internalCluster().getClusterManagerName(); blockNodeOnAnyFiles(repoName, clusterManagerName); final ActionFuture fut = startDeleteSnapshot(repoName, snapshotName); waitForBlock(clusterManagerName, repoName, TimeValue.timeValueSeconds(30L)); @@ -1510,9 +1510,9 @@ private ActionFuture startAndBlockOnDeleteSnapshot(String private ActionFuture startAndBlockFailingFullSnapshot(String blockedRepoName, String snapshotName) throws InterruptedException { - blockMasterFromFinalizingSnapshotOnIndexFile(blockedRepoName); + blockClusterManagerFromFinalizingSnapshotOnIndexFile(blockedRepoName); final ActionFuture fut = startFullSnapshot(blockedRepoName, snapshotName); - waitForBlock(internalCluster().getMasterName(), blockedRepoName, TimeValue.timeValueSeconds(30L)); + waitForBlock(internalCluster().getClusterManagerName(), blockedRepoName, TimeValue.timeValueSeconds(30L)); return fut; } diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/CorruptedBlobStoreRepositoryIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/CorruptedBlobStoreRepositoryIT.java index c253f1a4f876e..b806ee3e55a94 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/CorruptedBlobStoreRepositoryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/CorruptedBlobStoreRepositoryIT.java @@ -205,7 +205,7 @@ public void testConcurrentlyChangeRepositoryContentsInBwCMode() throws Exception equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) ); - final Repository repository = internalCluster().getMasterNodeInstance(RepositoriesService.class).repository(repoName); + final Repository repository = internalCluster().getClusterManagerNodeInstance(RepositoriesService.class).repository(repoName); logger.info("--> move index-N blob to next generation"); final RepositoryData repositoryData = getRepositoryData(repository); @@ -215,7 +215,7 @@ public void testConcurrentlyChangeRepositoryContentsInBwCMode() throws Exception logger.info("--> verify index-N blob is found at the new location"); assertThat(getRepositoryData(repository).getGenId(), is(beforeMoveGen + 1)); - final SnapshotsService snapshotsService = internalCluster().getCurrentMasterNodeInstance(SnapshotsService.class); + final SnapshotsService snapshotsService = internalCluster().getCurrentClusterManagerNodeInstance(SnapshotsService.class); logger.info("--> wait for all listeners on snapshots service to be resolved to avoid snapshot task batching causing a conflict"); assertBusy(() -> assertTrue(snapshotsService.assertAllListenersResolved())); @@ -267,7 +267,8 @@ public void testFindDanglingLatestGeneration() throws Exception { equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) ); - final Repository repository = internalCluster().getCurrentMasterNodeInstance(RepositoriesService.class).repository(repoName); + final Repository repository = internalCluster().getCurrentClusterManagerNodeInstance(RepositoriesService.class) + .repository(repoName); logger.info("--> move index-N blob to next generation"); final RepositoryData repositoryData = getRepositoryData(repoName); @@ -367,8 +368,8 @@ public void testHandlingMissingRootLevelSnapshotMetadata() throws Exception { ); logger.info("--> verify that repo is assumed in old metadata format"); - final SnapshotsService snapshotsService = internalCluster().getCurrentMasterNodeInstance(SnapshotsService.class); - final ThreadPool threadPool = internalCluster().getCurrentMasterNodeInstance(ThreadPool.class); + final SnapshotsService snapshotsService = internalCluster().getCurrentClusterManagerNodeInstance(SnapshotsService.class); + final ThreadPool threadPool = internalCluster().getCurrentClusterManagerNodeInstance(ThreadPool.class); assertThat( PlainActionFuture.get( f -> threadPool.generic() @@ -435,7 +436,8 @@ public void testMountCorruptedRepositoryData() throws Exception { ); logger.info("--> corrupt index-N blob"); - final Repository repository = internalCluster().getCurrentMasterNodeInstance(RepositoriesService.class).repository(repoName); + final Repository repository = internalCluster().getCurrentClusterManagerNodeInstance(RepositoriesService.class) + .repository(repoName); final RepositoryData repositoryData = getRepositoryData(repoName); Files.write(repo.resolve("index-" + repositoryData.getGenId()), randomByteArrayOfLength(randomIntBetween(1, 100))); @@ -444,7 +446,8 @@ public void testMountCorruptedRepositoryData() throws Exception { final String otherRepoName = "other-repo"; createRepository(otherRepoName, "fs", Settings.builder().put("location", repo).put("compress", false)); - final Repository otherRepo = internalCluster().getCurrentMasterNodeInstance(RepositoriesService.class).repository(otherRepoName); + final Repository otherRepo = internalCluster().getCurrentClusterManagerNodeInstance(RepositoriesService.class) + .repository(otherRepoName); logger.info("--> verify loading repository data from newly mounted repository throws RepositoryException"); expectThrows(RepositoryException.class, () -> getRepositoryData(otherRepo)); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 2eca8555e1388..7bb63fad7ad24 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -124,7 +124,7 @@ import java.util.function.Consumer; import static org.opensearch.index.seqno.RetentionLeaseActions.RETAIN_ALL; -import static org.opensearch.test.NodeRoles.nonMasterNode; +import static org.opensearch.test.NodeRoles.nonClusterManagerNode; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFutureThrows; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertRequestBuilderThrows; @@ -760,7 +760,7 @@ public void testRegistrationFailure() { internalCluster().startNode(); logger.info("--> start second node"); // Make sure the first node is elected as cluster-manager - internalCluster().startNode(nonMasterNode()); + internalCluster().startNode(nonClusterManagerNode()); // Register mock repositories for (int i = 0; i < 5; i++) { clusterAdmin().preparePutRepository("test-repo" + i) @@ -837,7 +837,7 @@ public void sendResponse(RestResponse response) { public void testClusterManagerShutdownDuringSnapshot() throws Exception { logger.info("--> starting two cluster-manager nodes and two data nodes"); - internalCluster().startMasterOnlyNodes(2); + internalCluster().startClusterManagerOnlyNodes(2); internalCluster().startDataOnlyNodes(2); final Path repoPath = randomRepoPath(); @@ -860,7 +860,7 @@ public void testClusterManagerShutdownDuringSnapshot() throws Exception { .get(); logger.info("--> stopping cluster-manager node"); - internalCluster().stopCurrentMasterNode(); + internalCluster().stopCurrentClusterManagerNode(); logger.info("--> wait until the snapshot is done"); @@ -875,7 +875,7 @@ public void testClusterManagerShutdownDuringSnapshot() throws Exception { public void testClusterManagerAndDataShutdownDuringSnapshot() throws Exception { logger.info("--> starting three cluster-manager nodes and two data nodes"); - internalCluster().startMasterOnlyNodes(3); + internalCluster().startClusterManagerOnlyNodes(3); internalCluster().startDataOnlyNodes(2); final Path repoPath = randomRepoPath(); @@ -890,7 +890,7 @@ public void testClusterManagerAndDataShutdownDuringSnapshot() throws Exception { final int numberOfShards = getNumShards("test-idx").numPrimaries; logger.info("number of shards: {}", numberOfShards); - final String clusterManagerNode = blockMasterFromFinalizingSnapshotOnSnapFile("test-repo"); + final String clusterManagerNode = blockClusterManagerFromFinalizingSnapshotOnSnapFile("test-repo"); final String dataNode = blockNodeWithIndex("test-repo", "test-idx"); dataNodeClient().admin() @@ -903,7 +903,7 @@ public void testClusterManagerAndDataShutdownDuringSnapshot() throws Exception { logger.info("--> stopping data node {}", dataNode); stopNode(dataNode); logger.info("--> stopping cluster-manager node {} ", clusterManagerNode); - internalCluster().stopCurrentMasterNode(); + internalCluster().stopCurrentClusterManagerNode(); logger.info("--> wait until the snapshot is done"); @@ -1159,7 +1159,7 @@ public void testDataNodeRestartWithBusyClusterManagerDuringSnapshot() throws Exc logger.info("--> snapshot"); ServiceDisruptionScheme disruption = new BusyMasterServiceDisruption(random(), Priority.HIGH); setDisruptionScheme(disruption); - client(internalCluster().getMasterName()).admin() + client(internalCluster().getClusterManagerName()).admin() .cluster() .prepareCreateSnapshot("test-repo", "test-snap") .setWaitForCompletion(false) @@ -1213,7 +1213,7 @@ public void testDataNodeRestartAfterShardSnapshotFailure() throws Exception { blockAllDataNodes("test-repo"); logger.info("--> snapshot"); - client(internalCluster().getMasterName()).admin() + client(internalCluster().getClusterManagerName()).admin() .cluster() .prepareCreateSnapshot("test-repo", "test-snap") .setWaitForCompletion(false) @@ -1399,7 +1399,7 @@ public void testCreateSnapshotLegacyPath() throws Exception { createRepository(repoName, "fs"); createIndex("some-index"); - final SnapshotsService snapshotsService = internalCluster().getMasterNodeInstance(SnapshotsService.class); + final SnapshotsService snapshotsService = internalCluster().getClusterManagerNodeInstance(SnapshotsService.class); final Snapshot snapshot1 = PlainActionFuture.get( f -> snapshotsService.createSnapshotLegacy(new CreateSnapshotRequest(repoName, "snap-1"), f) ); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java index 0750675d46b9c..5e44355c8424c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java @@ -198,7 +198,7 @@ private void assertIndexMetadataLoads(final String snapshot, final String index, } private CountingMockRepository getCountingMockRepository() { - String clusterManager = internalCluster().getMasterName(); + String clusterManager = internalCluster().getClusterManagerName(); RepositoriesService repositoriesService = internalCluster().getInstance(RepositoriesService.class, clusterManager); Repository repository = repositoriesService.repository("repository"); assertThat(repository, instanceOf(CountingMockRepository.class)); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java index e72110f4c4efd..197a8a10f3a20 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java @@ -157,7 +157,7 @@ public void testResidualStaleIndicesAreDeletedByConsecutiveDelete() throws Excep createFullSnapshot(repositoryName, snapshotToBeDeletedLastName); // Create more snapshots to be deleted in bulk - int maxThreadsForSnapshotDeletion = internalCluster().getMasterNodeInstance(ThreadPool.class) + int maxThreadsForSnapshotDeletion = internalCluster().getClusterManagerNodeInstance(ThreadPool.class) .info(ThreadPool.Names.SNAPSHOT) .getMax(); for (int i = 1; i <= maxThreadsForSnapshotDeletion + 1; i++) { @@ -177,7 +177,7 @@ public void testResidualStaleIndicesAreDeletedByConsecutiveDelete() throws Excep // Make repository to throw exception when trying to delete stale indices // This will make sure stale indices stays in repository after snapshot delete - String clusterManagerNode = internalCluster().getMasterName(); + String clusterManagerNode = internalCluster().getClusterManagerName(); ((MockRepository) internalCluster().getInstance(RepositoriesService.class, clusterManagerNode).repository("test-repo")) .setThrowExceptionWhileDelete(true); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoryFilterUserMetadataIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoryFilterUserMetadataIT.java index d84eb9ea1e269..979e6443a9b4c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoryFilterUserMetadataIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoryFilterUserMetadataIT.java @@ -70,7 +70,7 @@ protected Collection> nodePlugins() { } public void testFilteredRepoMetadataIsUsed() { - final String clusterManagerName = internalCluster().getMasterName(); + final String clusterManagerName = internalCluster().getClusterManagerName(); final String repoName = "test-repo"; assertAcked( client().admin() diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SharedClusterSnapshotRestoreIT.java index fa04bfbf4e959..ee9e162a34f5c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -1916,7 +1916,7 @@ public void testSnapshotCanceledOnRemovedShard() throws Exception { waitForBlock(blockedNode, repo, TimeValue.timeValueSeconds(10)); logger.info("--> removing primary shard that is being snapshotted"); - ClusterState clusterState = internalCluster().clusterService(internalCluster().getMasterName()).state(); + ClusterState clusterState = internalCluster().clusterService(internalCluster().getClusterManagerName()).state(); IndexRoutingTable indexRoutingTable = clusterState.getRoutingTable().index(index); String nodeWithPrimary = clusterState.nodes().get(indexRoutingTable.shard(0).primaryShard().currentNodeId()).getName(); assertNotNull("should be at least one node with a primary shard", nodeWithPrimary); @@ -2368,7 +2368,7 @@ public void testIndexLatestFailuresIgnored() throws Exception { final String repoName = "test-repo"; final Path repoPath = randomRepoPath(); createRepository(repoName, "mock", repoPath); - final MockRepository repository = (MockRepository) internalCluster().getCurrentMasterNodeInstance(RepositoriesService.class) + final MockRepository repository = (MockRepository) internalCluster().getCurrentClusterManagerNodeInstance(RepositoriesService.class) .repository(repoName); repository.setFailOnIndexLatest(true); createFullSnapshot(repoName, "snapshot-1"); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java index 1376961825e8b..b6fa4fbc2fc96 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java @@ -238,7 +238,7 @@ public void testCorrectCountsForDoneShards() throws Exception { final String snapshotOne = "snap-1"; // restarting a data node below so using a cluster-manager client here - final ActionFuture responseSnapshotOne = internalCluster().masterClient() + final ActionFuture responseSnapshotOne = internalCluster().clusterManagerClient() .admin() .cluster() .prepareCreateSnapshot(repoName, snapshotOne) diff --git a/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceDeprecatedMasterTests.java b/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceDeprecatedMasterTests.java index e4fd8064098dd..fb3a628827462 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceDeprecatedMasterTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceDeprecatedMasterTests.java @@ -56,7 +56,7 @@ import static org.opensearch.cluster.coordination.ClusterBootstrapService.UNCONFIGURED_BOOTSTRAP_TIMEOUT_SETTING; import static org.opensearch.common.settings.Settings.builder; import static org.opensearch.node.Node.NODE_NAME_SETTING; -import static org.opensearch.test.NodeRoles.nonMasterNode; +import static org.opensearch.test.NodeRoles.nonClusterManagerNode; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -298,7 +298,7 @@ public void testFailBootstrapNonMasterEligibleNodeWithSingleNodeDiscovery() { final Settings.Builder settings = Settings.builder() .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE) .put(NODE_NAME_SETTING.getKey(), localNode.getName()) - .put(nonMasterNode()); + .put(nonClusterManagerNode()); assertThat( expectThrows( diff --git a/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceTests.java b/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceTests.java index b2b7c167ec7c7..9f9ccf34a6a9d 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceTests.java @@ -63,7 +63,7 @@ import static org.opensearch.discovery.DiscoveryModule.DISCOVERY_SEED_PROVIDERS_SETTING; import static org.opensearch.discovery.SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING; import static org.opensearch.node.Node.NODE_NAME_SETTING; -import static org.opensearch.test.NodeRoles.nonMasterNode; +import static org.opensearch.test.NodeRoles.nonClusterManagerNode; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; @@ -680,7 +680,7 @@ public void testFailBootstrapNonClusterManagerEligibleNodeWithSingleNodeDiscover final Settings.Builder settings = Settings.builder() .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE) .put(NODE_NAME_SETTING.getKey(), localNode.getName()) - .put(nonMasterNode()); + .put(nonClusterManagerNode()); assertThat( expectThrows( diff --git a/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java index 81fbaf5fce8c0..9f9de698296b8 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java @@ -90,7 +90,7 @@ import static org.opensearch.discovery.PeerFinder.DISCOVERY_FIND_PEERS_INTERVAL_SETTING; import static org.opensearch.monitor.StatusInfo.Status.HEALTHY; import static org.opensearch.monitor.StatusInfo.Status.UNHEALTHY; -import static org.opensearch.test.NodeRoles.nonMasterNode; +import static org.opensearch.test.NodeRoles.nonClusterManagerNode; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; @@ -1702,7 +1702,7 @@ public void testReconfiguresToExcludeClusterManagerIneligibleNodesInVotingConfig chosenNode.close(); cluster.clusterNodes.replaceAll( - cn -> cn == chosenNode ? cn.restartedNode(Function.identity(), Function.identity(), nonMasterNode()) : cn + cn -> cn == chosenNode ? cn.restartedNode(Function.identity(), Function.identity(), nonClusterManagerNode()) : cn ); cluster.stabilise(); diff --git a/server/src/test/java/org/opensearch/cluster/routing/MovePrimaryFirstTests.java b/server/src/test/java/org/opensearch/cluster/routing/MovePrimaryFirstTests.java index aa2be1fb652cd..a30581e2576e2 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/MovePrimaryFirstTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/MovePrimaryFirstTests.java @@ -56,7 +56,7 @@ protected void createAndIndex(String index, int replicaCount, int shardCount) { * enabled, cluster should not become red and zone2 nodes have all the primaries */ public void testClusterGreenAfterPartialRelocation() throws InterruptedException { - internalCluster().startMasterOnlyNodes(1); + internalCluster().startClusterManagerOnlyNodes(1); final String z1 = "zone-1", z2 = "zone-2"; final int primaryShardCount = 6; assertTrue("Primary shard count must be even for equal distribution across two nodes", primaryShardCount % 2 == 0); diff --git a/server/src/test/java/org/opensearch/env/NodeEnvironmentTests.java b/server/src/test/java/org/opensearch/env/NodeEnvironmentTests.java index f9e1b8e30af41..d886922d56882 100644 --- a/server/src/test/java/org/opensearch/env/NodeEnvironmentTests.java +++ b/server/src/test/java/org/opensearch/env/NodeEnvironmentTests.java @@ -66,7 +66,7 @@ import java.util.concurrent.atomic.AtomicReference; import static org.opensearch.test.NodeRoles.nonDataNode; -import static org.opensearch.test.NodeRoles.nonMasterNode; +import static org.opensearch.test.NodeRoles.nonClusterManagerNode; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.containsString; @@ -473,7 +473,7 @@ public void testCustomDataPaths() throws Exception { public void testNodeIdNotPersistedAtInitialization() throws IOException { NodeEnvironment env = newNodeEnvironment( new String[0], - nonMasterNode(nonDataNode(Settings.builder().put("node.local_storage", false).build())) + nonClusterManagerNode(nonDataNode(Settings.builder().put("node.local_storage", false).build())) ); String nodeID = env.nodeId(); env.close(); @@ -566,7 +566,7 @@ public void testEnsureNoShardDataOrIndexMetadata() throws IOException { verifyFailsOnMetadata(noDataNoClusterManagerSettings, indexPath); // build settings using same path.data as original but without cluster-manager role - Settings noClusterManagerSettings = nonMasterNode(settings); + Settings noClusterManagerSettings = nonClusterManagerNode(settings); // test that we can create cluster_manager=false env regardless of data. newNodeEnvironment(noClusterManagerSettings).close(); diff --git a/server/src/test/java/org/opensearch/env/NodeRepurposeCommandTests.java b/server/src/test/java/org/opensearch/env/NodeRepurposeCommandTests.java index ffcbb3eed91f7..009ff324809b4 100644 --- a/server/src/test/java/org/opensearch/env/NodeRepurposeCommandTests.java +++ b/server/src/test/java/org/opensearch/env/NodeRepurposeCommandTests.java @@ -64,9 +64,9 @@ import static org.opensearch.env.NodeRepurposeCommand.NO_CLEANUP; import static org.opensearch.env.NodeRepurposeCommand.NO_DATA_TO_CLEAN_UP_FOUND; import static org.opensearch.env.NodeRepurposeCommand.NO_SHARD_DATA_TO_CLEAN_UP_FOUND; -import static org.opensearch.test.NodeRoles.masterNode; +import static org.opensearch.test.NodeRoles.clusterManagerNode; import static org.opensearch.test.NodeRoles.nonDataNode; -import static org.opensearch.test.NodeRoles.nonMasterNode; +import static org.opensearch.test.NodeRoles.nonClusterManagerNode; import static org.opensearch.test.NodeRoles.removeRoles; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; @@ -102,13 +102,13 @@ public void createNodePaths() throws IOException { writer.writeFullStateAndCommit(1L, ClusterState.EMPTY_STATE); } } - dataNoClusterManagerSettings = nonMasterNode(dataClusterManagerSettings); + dataNoClusterManagerSettings = nonClusterManagerNode(dataClusterManagerSettings); noDataNoClusterManagerSettings = removeRoles( dataClusterManagerSettings, Collections.unmodifiableSet(new HashSet<>(Arrays.asList(DiscoveryNodeRole.DATA_ROLE, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE))) ); - noDataClusterManagerSettings = masterNode(nonDataNode(dataClusterManagerSettings)); + noDataClusterManagerSettings = clusterManagerNode(nonDataNode(dataClusterManagerSettings)); } public void testEarlyExitNoCleanup() throws Exception { diff --git a/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java b/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java index 339b04f45c830..cc9a7a70b99f4 100644 --- a/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java +++ b/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java @@ -75,7 +75,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; -import static org.opensearch.test.NodeRoles.nonMasterNode; +import static org.opensearch.test.NodeRoles.nonClusterManagerNode; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.not; @@ -405,7 +405,7 @@ public void testDataOnlyNodePersistence() throws Exception { ); Settings settings = Settings.builder() .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), clusterName.value()) - .put(nonMasterNode()) + .put(nonClusterManagerNode()) .put(Node.NODE_NAME_SETTING.getKey(), "test") .build(); final MockGatewayMetaState gateway = new MockGatewayMetaState(localNode, bigArrays); diff --git a/server/src/test/java/org/opensearch/gateway/GatewayServiceTests.java b/server/src/test/java/org/opensearch/gateway/GatewayServiceTests.java index ba0268b627b95..2d8a26f8bbe87 100644 --- a/server/src/test/java/org/opensearch/gateway/GatewayServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/GatewayServiceTests.java @@ -61,7 +61,7 @@ import java.util.HashSet; import static org.opensearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; -import static org.opensearch.test.NodeRoles.masterNode; +import static org.opensearch.test.NodeRoles.clusterManagerNode; import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.Matchers.hasItem; @@ -130,7 +130,7 @@ public void testRecoverStateUpdateTask() throws Exception { ClusterStateUpdateTask clusterStateUpdateTask = service.new RecoverStateUpdateTask(); String nodeId = randomAlphaOfLength(10); DiscoveryNode clusterManagerNode = DiscoveryNode.createLocal( - settings(Version.CURRENT).put(masterNode()).build(), + settings(Version.CURRENT).put(clusterManagerNode()).build(), new TransportAddress(TransportAddress.META_ADDRESS, 9300), nodeId ); diff --git a/server/src/test/java/org/opensearch/transport/ConnectionProfileTests.java b/server/src/test/java/org/opensearch/transport/ConnectionProfileTests.java index d6dc56204f5da..d3a20e9b68e34 100644 --- a/server/src/test/java/org/opensearch/transport/ConnectionProfileTests.java +++ b/server/src/test/java/org/opensearch/transport/ConnectionProfileTests.java @@ -44,8 +44,8 @@ import java.util.HashSet; import java.util.List; +import static org.opensearch.test.NodeRoles.nonClusterManagerNode; import static org.opensearch.test.NodeRoles.nonDataNode; -import static org.opensearch.test.NodeRoles.nonMasterNode; import static org.opensearch.test.NodeRoles.removeRoles; import static org.hamcrest.Matchers.equalTo; @@ -240,7 +240,7 @@ public void testDefaultConnectionProfile() { assertEquals(TransportSettings.TRANSPORT_COMPRESS.get(Settings.EMPTY), profile.getCompressionEnabled()); assertEquals(TransportSettings.PING_SCHEDULE.get(Settings.EMPTY), profile.getPingInterval()); - profile = ConnectionProfile.buildDefaultConnectionProfile(nonMasterNode()); + profile = ConnectionProfile.buildDefaultConnectionProfile(nonClusterManagerNode()); assertEquals(12, profile.getNumConnections()); assertEquals(1, profile.getNumConnectionsPerType(TransportRequestOptions.Type.PING)); assertEquals(6, profile.getNumConnectionsPerType(TransportRequestOptions.Type.REG)); diff --git a/server/src/test/java/org/opensearch/transport/RemoteClusterServiceTests.java b/server/src/test/java/org/opensearch/transport/RemoteClusterServiceTests.java index 9bb8b79377939..b8dd17da45acb 100644 --- a/server/src/test/java/org/opensearch/transport/RemoteClusterServiceTests.java +++ b/server/src/test/java/org/opensearch/transport/RemoteClusterServiceTests.java @@ -63,7 +63,7 @@ import java.util.function.BiFunction; import static org.opensearch.test.NodeRoles.clusterManagerOnlyNode; -import static org.opensearch.test.NodeRoles.nonMasterNode; +import static org.opensearch.test.NodeRoles.nonClusterManagerNode; import static org.opensearch.test.NodeRoles.removeRoles; import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; @@ -552,7 +552,7 @@ public void testRemoteNodeAttribute() throws IOException, InterruptedException { public void testRemoteNodeRoles() throws IOException, InterruptedException { final Settings settings = Settings.EMPTY; final List knownNodes = new CopyOnWriteArrayList<>(); - final Settings data = nonMasterNode(); + final Settings data = nonClusterManagerNode(); final Settings dedicatedClusterManager = clusterManagerOnlyNode(); try ( MockTransportService c1N1 = startTransport("cluster_1_node_1", knownNodes, Version.CURRENT, dedicatedClusterManager); diff --git a/test/framework/src/main/java/org/opensearch/repositories/blobstore/BlobStoreTestUtil.java b/test/framework/src/main/java/org/opensearch/repositories/blobstore/BlobStoreTestUtil.java index 50158c6ecf053..f9bc0d1066d8e 100644 --- a/test/framework/src/main/java/org/opensearch/repositories/blobstore/BlobStoreTestUtil.java +++ b/test/framework/src/main/java/org/opensearch/repositories/blobstore/BlobStoreTestUtil.java @@ -105,7 +105,7 @@ public final class BlobStoreTestUtil { public static void assertRepoConsistency(InternalTestCluster testCluster, String repoName) { - final BlobStoreRepository repo = (BlobStoreRepository) testCluster.getCurrentMasterNodeInstance(RepositoriesService.class) + final BlobStoreRepository repo = (BlobStoreRepository) testCluster.getCurrentClusterManagerNodeInstance(RepositoriesService.class) .repository(repoName); BlobStoreTestUtil.assertConsistency(repo, repo.threadPool().executor(ThreadPool.Names.GENERIC)); } diff --git a/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchBlobStoreRepositoryIntegTestCase.java b/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchBlobStoreRepositoryIntegTestCase.java index 7d9810a11e143..f2b713852584b 100644 --- a/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchBlobStoreRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchBlobStoreRepositoryIntegTestCase.java @@ -107,7 +107,7 @@ protected final String createRepository(final String name, final Settings settin client().admin().cluster().preparePutRepository(name).setType(repositoryType()).setVerify(verify).setSettings(settings) ); - internalCluster().getDataOrMasterNodeInstances(RepositoriesService.class).forEach(repositories -> { + internalCluster().getDataOrClusterManagerNodeInstances(RepositoriesService.class).forEach(repositories -> { assertThat(repositories.repository(name), notNullValue()); assertThat(repositories.repository(name), instanceOf(BlobStoreRepository.class)); assertThat(repositories.repository(name).isReadOnly(), is(false)); @@ -280,7 +280,7 @@ protected static void writeBlob(BlobContainer container, String blobName, BytesA protected BlobStore newBlobStore() { final String repository = createRepository(randomName()); - final BlobStoreRepository blobStoreRepository = (BlobStoreRepository) internalCluster().getMasterNodeInstance( + final BlobStoreRepository blobStoreRepository = (BlobStoreRepository) internalCluster().getClusterManagerNodeInstance( RepositoriesService.class ).repository(repository); return PlainActionFuture.get( @@ -470,8 +470,11 @@ public void testIndicesDeletedFromRepository() throws Exception { assertAcked(client().admin().cluster().prepareDeleteSnapshot(repoName, "test-snap").get()); logger.info("--> verify index folder deleted from blob container"); - RepositoriesService repositoriesSvc = internalCluster().getInstance(RepositoriesService.class, internalCluster().getMasterName()); - ThreadPool threadPool = internalCluster().getInstance(ThreadPool.class, internalCluster().getMasterName()); + RepositoriesService repositoriesSvc = internalCluster().getInstance( + RepositoriesService.class, + internalCluster().getClusterManagerName() + ); + ThreadPool threadPool = internalCluster().getInstance(ThreadPool.class, internalCluster().getClusterManagerName()); BlobStoreRepository repository = (BlobStoreRepository) repositoriesSvc.repository(repoName); final SetOnce indicesBlobContainer = new SetOnce<>(); diff --git a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java index 3594bf9f53ca4..bbac1ef591418 100644 --- a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -166,7 +166,7 @@ protected void disableRepoConsistencyCheck(String reason) { } protected RepositoryData getRepositoryData(String repository) { - return getRepositoryData(internalCluster().getCurrentMasterNodeInstance(RepositoriesService.class).repository(repository)); + return getRepositoryData(internalCluster().getCurrentClusterManagerNodeInstance(RepositoriesService.class).repository(repository)); } protected RepositoryData getRepositoryData(Repository repository) { @@ -175,7 +175,7 @@ protected RepositoryData getRepositoryData(Repository repository) { public static long getFailureCount(String repository) { long failureCount = 0; - for (RepositoriesService repositoriesService : internalCluster().getDataOrMasterNodeInstances(RepositoriesService.class)) { + for (RepositoriesService repositoriesService : internalCluster().getDataOrClusterManagerNodeInstances(RepositoriesService.class)) { MockRepository mockRepository = (MockRepository) repositoriesService.repository(repository); failureCount += mockRepository.getFailureCount(); } @@ -251,33 +251,57 @@ public SnapshotInfo waitForCompletion(String repository, String snapshotName, Ti return null; } - public static String blockMasterFromFinalizingSnapshotOnIndexFile(final String repositoryName) { - final String clusterManagerName = internalCluster().getMasterName(); + public static String blockClusterManagerFromFinalizingSnapshotOnIndexFile(final String repositoryName) { + final String clusterManagerName = internalCluster().getClusterManagerName(); ((MockRepository) internalCluster().getInstance(RepositoriesService.class, clusterManagerName).repository(repositoryName)) .setBlockAndFailOnWriteIndexFile(); return clusterManagerName; } - public static String blockMasterOnWriteIndexFile(final String repositoryName) { - final String clusterManagerName = internalCluster().getMasterName(); - ((MockRepository) internalCluster().getMasterNodeInstance(RepositoriesService.class).repository(repositoryName)) + public static String blockClusterManagerOnWriteIndexFile(final String repositoryName) { + final String clusterManagerName = internalCluster().getClusterManagerName(); + ((MockRepository) internalCluster().getClusterManagerNodeInstance(RepositoriesService.class).repository(repositoryName)) .setBlockOnWriteIndexFile(); return clusterManagerName; } - public static void blockMasterFromDeletingIndexNFile(String repositoryName) { - final String clusterManagerName = internalCluster().getMasterName(); + public static void blockClusterManagerFromDeletingIndexNFile(String repositoryName) { + final String clusterManagerName = internalCluster().getClusterManagerName(); ((MockRepository) internalCluster().getInstance(RepositoriesService.class, clusterManagerName).repository(repositoryName)) .setBlockOnDeleteIndexFile(); } - public static String blockMasterFromFinalizingSnapshotOnSnapFile(final String repositoryName) { - final String clusterManagerName = internalCluster().getMasterName(); + public static String blockClusterManagerFromFinalizingSnapshotOnSnapFile(final String repositoryName) { + final String clusterManagerName = internalCluster().getClusterManagerName(); ((MockRepository) internalCluster().getInstance(RepositoriesService.class, clusterManagerName).repository(repositoryName)) .setBlockAndFailOnWriteSnapFiles(true); return clusterManagerName; } + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #blockClusterManagerFromFinalizingSnapshotOnIndexFile(String)} */ + @Deprecated + public static String blockMasterFromFinalizingSnapshotOnIndexFile(final String repositoryName) { + return blockClusterManagerFromFinalizingSnapshotOnIndexFile(repositoryName); + } + + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #blockClusterManagerOnWriteIndexFile(String)} */ + @Deprecated + public static String blockMasterOnWriteIndexFile(final String repositoryName) { + return blockClusterManagerOnWriteIndexFile(repositoryName); + } + + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #blockClusterManagerFromDeletingIndexNFile(String)} */ + @Deprecated + public static void blockMasterFromDeletingIndexNFile(String repositoryName) { + blockClusterManagerFromDeletingIndexNFile(repositoryName); + } + + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #blockClusterManagerFromFinalizingSnapshotOnSnapFile(String)} */ + @Deprecated + public static String blockMasterFromFinalizingSnapshotOnSnapFile(final String repositoryName) { + return blockClusterManagerFromFinalizingSnapshotOnSnapFile(repositoryName); + } + public static String blockNodeWithIndex(final String repositoryName, final String indexName) { for (String node : internalCluster().nodesInclude(indexName)) { ((MockRepository) internalCluster().getInstance(RepositoriesService.class, node).repository(repositoryName)).blockOnDataFiles( @@ -479,7 +503,7 @@ protected void addBwCFailedSnapshot(String repoName, String snapshotName, Map adding old version FAILED snapshot [{}] to repository [{}]", snapshotId, repoName); final SnapshotInfo snapshotInfo = new SnapshotInfo( @@ -511,7 +535,7 @@ protected void addBwCFailedSnapshot(String repoName, String snapshotName, Map statePredicate) throws Exception { - awaitClusterState(internalCluster().getMasterName(), statePredicate); + awaitClusterState(internalCluster().getClusterManagerName(), statePredicate); } protected void awaitClusterState(String viaNode, Predicate statePredicate) throws Exception { @@ -601,7 +625,7 @@ protected ActionFuture startDeleteSnapshot(String repoName protected void updateClusterState(final Function updater) throws Exception { final PlainActionFuture future = PlainActionFuture.newFuture(); - final ClusterService clusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); + final ClusterService clusterService = internalCluster().getCurrentClusterManagerNodeInstance(ClusterService.class); clusterService.submitStateUpdateTask("test", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { @@ -629,7 +653,7 @@ protected SnapshotInfo getSnapshot(String repository, String snapshot) { protected void awaitClusterManagerFinishRepoOperations() throws Exception { logger.info("--> waiting for cluster-manager to finish all repo operations on its SNAPSHOT pool"); - final ThreadPool clusterManagerThreadPool = internalCluster().getMasterNodeInstance(ThreadPool.class); + final ThreadPool clusterManagerThreadPool = internalCluster().getClusterManagerNodeInstance(ThreadPool.class); assertBusy(() -> { for (ThreadPoolStats.Stats stat : clusterManagerThreadPool.stats()) { if (ThreadPool.Names.SNAPSHOT.equals(stat.getName())) { @@ -639,4 +663,10 @@ protected void awaitClusterManagerFinishRepoOperations() throws Exception { } }); } + + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #awaitClusterManagerFinishRepoOperations()} */ + @Deprecated + protected void awaitMasterFinishRepoOperations() throws Exception { + awaitClusterManagerFinishRepoOperations(); + } } diff --git a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java index 7124cc6a180ac..c68e1eac3e1d8 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java @@ -207,8 +207,15 @@ public final class InternalTestCluster extends TestCluster { nodeAndClient.node.settings() ); - public static final int DEFAULT_LOW_NUM_MASTER_NODES = 1; - public static final int DEFAULT_HIGH_NUM_MASTER_NODES = 3; + public static final int DEFAULT_LOW_NUM_CLUSTER_MANAGER_NODES = 1; + public static final int DEFAULT_HIGH_NUM_CLUSTER_MANAGER_NODES = 3; + + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #DEFAULT_LOW_NUM_CLUSTER_MANAGER_NODES} */ + @Deprecated + public static final int DEFAULT_LOW_NUM_MASTER_NODES = DEFAULT_LOW_NUM_CLUSTER_MANAGER_NODES; + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #DEFAULT_HIGH_NUM_CLUSTER_MANAGER_NODES} */ + @Deprecated + public static final int DEFAULT_HIGH_NUM_MASTER_NODES = DEFAULT_HIGH_NUM_CLUSTER_MANAGER_NODES; static final int DEFAULT_MIN_NUM_DATA_NODES = 1; static final int DEFAULT_MAX_NUM_DATA_NODES = TEST_NIGHTLY ? 6 : 3; @@ -341,9 +348,9 @@ public InternalTestCluster( if (useDedicatedClusterManagerNodes) { if (random.nextBoolean()) { // use a dedicated cluster-manager, but only low number to reduce overhead to tests - this.numSharedDedicatedClusterManagerNodes = DEFAULT_LOW_NUM_MASTER_NODES; + this.numSharedDedicatedClusterManagerNodes = DEFAULT_LOW_NUM_CLUSTER_MANAGER_NODES; } else { - this.numSharedDedicatedClusterManagerNodes = DEFAULT_HIGH_NUM_MASTER_NODES; + this.numSharedDedicatedClusterManagerNodes = DEFAULT_HIGH_NUM_CLUSTER_MANAGER_NODES; } } else { this.numSharedDedicatedClusterManagerNodes = 0; @@ -670,7 +677,7 @@ public synchronized void ensureAtMostNumDataNodes(int n) throws IOException { // prevent killing the cluster-manager if possible and client nodes final Stream collection = n == 0 ? nodes.values().stream() - : nodes.values().stream().filter(DATA_NODE_PREDICATE.and(new NodeNamePredicate(getMasterName()).negate())); + : nodes.values().stream().filter(DATA_NODE_PREDICATE.and(new NodeNamePredicate(getClusterManagerName()).negate())); final Iterator values = collection.iterator(); logger.info("changing cluster size from {} data nodes to {}", size, n); @@ -825,8 +832,8 @@ public Client dataNodeClient() { * Returns a node client to the current cluster-manager node. * Note: use this with care tests should not rely on a certain nodes client. */ - public Client masterClient() { - NodeAndClient randomNodeAndClient = getRandomNodeAndClient(new NodeNamePredicate(getMasterName())); + public Client clusterManagerClient() { + NodeAndClient randomNodeAndClient = getRandomNodeAndClient(new NodeNamePredicate(getClusterManagerName())); if (randomNodeAndClient != null) { return randomNodeAndClient.nodeClient(); // ensure node client cluster-manager is requested } @@ -836,14 +843,33 @@ public Client masterClient() { /** * Returns a node client to random node but not the cluster-manager. This method will fail if no non-cluster-manager client is available. */ - public Client nonMasterClient() { - NodeAndClient randomNodeAndClient = getRandomNodeAndClient(new NodeNamePredicate(getMasterName()).negate()); + public Client nonClusterManagerClient() { + NodeAndClient randomNodeAndClient = getRandomNodeAndClient(new NodeNamePredicate(getClusterManagerName()).negate()); if (randomNodeAndClient != null) { return randomNodeAndClient.nodeClient(); // ensure node client non-cluster-manager is requested } throw new AssertionError("No non-cluster-manager client found"); } + /** + * Returns a node client to the current cluster-manager node. + * Note: use this with care tests should not rely on a certain nodes client. + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerClient()} + */ + @Deprecated + public Client masterClient() { + return clusterManagerClient(); + } + + /** + * Returns a node client to random node but not the cluster-manager. This method will fail if no non-cluster-manager client is available. + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #nonClusterManagerClient()} + */ + @Deprecated + public Client nonMasterClient() { + return nonClusterManagerClient(); + } + /** * Returns a client to a coordinating only node */ @@ -902,7 +928,10 @@ public synchronized void close() throws IOException { } } - public static final int REMOVED_MINIMUM_MASTER_NODES = Integer.MAX_VALUE; + public static final int REMOVED_MINIMUM_CLUSTER_MANAGER_NODES = Integer.MAX_VALUE; + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #REMOVED_MINIMUM_CLUSTER_MANAGER_NODES} */ + @Deprecated + public static final int REMOVED_MINIMUM_MASTER_NODES = REMOVED_MINIMUM_CLUSTER_MANAGER_NODES; private final class NodeAndClient implements Closeable { private MockNode node; @@ -935,10 +964,16 @@ public String getName() { return name; } - public boolean isMasterEligible() { + public boolean isClusterManagerEligible() { return DiscoveryNode.isClusterManagerNode(node.settings()); } + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #isClusterManagerEligible()} */ + @Deprecated + public boolean isMasterEligible() { + return isClusterManagerEligible(); + } + Client client() { return getOrBuildNodeClient(); } @@ -1127,7 +1162,7 @@ private synchronized void reset(boolean wipeData) throws IOException { assertTrue( "expected at least one cluster-manager-eligible node left in " + nodes, - nodes.isEmpty() || nodes.values().stream().anyMatch(NodeAndClient::isMasterEligible) + nodes.isEmpty() || nodes.values().stream().anyMatch(NodeAndClient::isClusterManagerEligible) ); final int prevNodeCount = nodes.size(); @@ -1560,16 +1595,33 @@ public Iterable getDataNodeInstances(Class clazz) { return getInstances(clazz, DATA_NODE_PREDICATE); } + public synchronized T getCurrentClusterManagerNodeInstance(Class clazz) { + return getInstance(clazz, new NodeNamePredicate(getClusterManagerName())); + } + + /** + * Returns an Iterable to all instances for the given class >T< across all data and cluster-manager nodes + * in the cluster. + */ + public Iterable getDataOrClusterManagerNodeInstances(Class clazz) { + return getInstances(clazz, DATA_NODE_PREDICATE.or(CLUSTER_MANAGER_NODE_PREDICATE)); + } + + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #getCurrentClusterManagerNodeInstance(Class)} */ + @Deprecated public synchronized T getCurrentMasterNodeInstance(Class clazz) { - return getInstance(clazz, new NodeNamePredicate(getMasterName())); + return getCurrentClusterManagerNodeInstance(clazz); } /** * Returns an Iterable to all instances for the given class >T< across all data and cluster-manager nodes * in the cluster. + * + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #getDataOrClusterManagerNodeInstances(Class)} */ + @Deprecated public Iterable getDataOrMasterNodeInstances(Class clazz) { - return getInstances(clazz, DATA_NODE_PREDICATE.or(CLUSTER_MANAGER_NODE_PREDICATE)); + return getDataOrClusterManagerNodeInstances(clazz); } private Iterable getInstances(Class clazz, Predicate predicate) { @@ -1592,10 +1644,16 @@ public T getDataNodeInstance(Class clazz) { return getInstance(clazz, DATA_NODE_PREDICATE); } - public T getMasterNodeInstance(Class clazz) { + public T getClusterManagerNodeInstance(Class clazz) { return getInstance(clazz, CLUSTER_MANAGER_NODE_PREDICATE); } + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #getClusterManagerNodeInstance(Class)} */ + @Deprecated + public T getMasterNodeInstance(Class clazz) { + return getClusterManagerNodeInstance(clazz); + } + private synchronized T getInstance(Class clazz, Predicate predicate) { NodeAndClient randomNodeAndClient = getRandomNodeAndClient(predicate); assert randomNodeAndClient != null; @@ -1662,11 +1720,11 @@ public synchronized void stopRandomNode(final Predicate filter) throws if (nodeAndClient != null) { if (nodePrefix.equals(OpenSearchIntegTestCase.SUITE_CLUSTER_NODE_PREFIX) && nodeAndClient.nodeAndClientId() < sharedNodesSeeds.length - && nodeAndClient.isMasterEligible() + && nodeAndClient.isClusterManagerEligible() && autoManageClusterManagerNodes && nodes.values() .stream() - .filter(NodeAndClient::isMasterEligible) + .filter(NodeAndClient::isClusterManagerEligible) .filter(n -> n.nodeAndClientId() < sharedNodesSeeds.length) .count() == 1) { throw new AssertionError("Tried to stop the only cluster-manager eligible shared node"); @@ -1677,12 +1735,12 @@ public synchronized void stopRandomNode(final Predicate filter) throws } /** - * Stops the current cluster-manager node forcefully + * Stops the current cluster-manager node forcefully. */ - public synchronized void stopCurrentMasterNode() throws IOException { + public synchronized void stopCurrentClusterManagerNode() throws IOException { ensureOpen(); assert size() > 0; - String clusterManagerNodeName = getMasterName(); + String clusterManagerNodeName = getClusterManagerName(); final NodeAndClient clusterManagerNode = nodes.get(clusterManagerNodeName); assert clusterManagerNode != null; logger.info("Closing cluster-manager node [{}] ", clusterManagerNodeName); @@ -1692,18 +1750,42 @@ public synchronized void stopCurrentMasterNode() throws IOException { /** * Stops any of the current nodes but not the cluster-manager node. */ - public synchronized void stopRandomNonMasterNode() throws IOException { - NodeAndClient nodeAndClient = getRandomNodeAndClient(new NodeNamePredicate(getMasterName()).negate()); + public synchronized void stopRandomNonClusterManagerNode() throws IOException { + NodeAndClient nodeAndClient = getRandomNodeAndClient(new NodeNamePredicate(getClusterManagerName()).negate()); if (nodeAndClient != null) { - logger.info("Closing random non cluster-manager node [{}] current cluster-manager [{}] ", nodeAndClient.name, getMasterName()); + logger.info( + "Closing random non cluster-manager node [{}] current cluster-manager [{}] ", + nodeAndClient.name, + getClusterManagerName() + ); stopNodesAndClient(nodeAndClient); } } + /** + * Stops the current cluster-manager node forcefully. + * + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #stopCurrentClusterManagerNode()} + */ + @Deprecated + public synchronized void stopCurrentMasterNode() throws IOException { + stopCurrentClusterManagerNode(); + } + + /** + * Stops any of the current nodes but not the cluster-manager node. + * + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #stopRandomNonClusterManagerNode()} + */ + @Deprecated + public synchronized void stopRandomNonMasterNode() throws IOException { + stopRandomNonClusterManagerNode(); + } + private synchronized void startAndPublishNodesAndClients(List nodeAndClients) { if (nodeAndClients.size() > 0) { final int newClusterManagers = (int) nodeAndClients.stream() - .filter(NodeAndClient::isMasterEligible) + .filter(NodeAndClient::isClusterManagerEligible) .filter(nac -> nodes.containsKey(nac.name) == false) // filter out old cluster-managers .count(); final int currentClusterManagers = getClusterManagerNodesCount(); @@ -1877,8 +1959,8 @@ private Set excludeClusterManagers(Collection nodeAndClie final Set excludedNodeNames = new HashSet<>(); if (autoManageClusterManagerNodes && nodeAndClients.size() > 0) { - final long currentClusterManagers = nodes.values().stream().filter(NodeAndClient::isMasterEligible).count(); - final long stoppingClusterManagers = nodeAndClients.stream().filter(NodeAndClient::isMasterEligible).count(); + final long currentClusterManagers = nodes.values().stream().filter(NodeAndClient::isClusterManagerEligible).count(); + final long stoppingClusterManagers = nodeAndClients.stream().filter(NodeAndClient::isClusterManagerEligible).count(); assert stoppingClusterManagers <= currentClusterManagers : currentClusterManagers + " < " + stoppingClusterManagers; if (stoppingClusterManagers != currentClusterManagers && stoppingClusterManagers > 0) { @@ -1887,7 +1969,10 @@ private Set excludeClusterManagers(Collection nodeAndClie // However, we do not yet have a way to be sure there's a majority left, because the voting configuration may not yet have // been updated when the previous nodes shut down, so we must always explicitly withdraw votes. // TODO add cluster health API to check that voting configuration is optimal so this isn't always needed - nodeAndClients.stream().filter(NodeAndClient::isMasterEligible).map(NodeAndClient::getName).forEach(excludedNodeNames::add); + nodeAndClients.stream() + .filter(NodeAndClient::isClusterManagerEligible) + .map(NodeAndClient::getName) + .forEach(excludedNodeNames::add); assert excludedNodeNames.size() == stoppingClusterManagers; logger.info("adding voting config exclusions {} prior to restart/shutdown", excludedNodeNames); @@ -1956,15 +2041,15 @@ public synchronized void fullRestart(RestartCallback callback) throws Exception /** * Returns the name of the current cluster-manager node in the cluster. */ - public String getMasterName() { - return getMasterName(null); + public String getClusterManagerName() { + return getClusterManagerName(null); } /** * Returns the name of the current cluster-manager node in the cluster and executes the request via the node specified * in the viaNode parameter. If viaNode isn't specified a random node will be picked to the send the request to. */ - public String getMasterName(@Nullable String viaNode) { + public String getClusterManagerName(@Nullable String viaNode) { try { Client client = viaNode != null ? client(viaNode) : client(); return client.admin().cluster().prepareState().get().getState().nodes().getClusterManagerNode().getName(); @@ -1974,6 +2059,27 @@ public String getMasterName(@Nullable String viaNode) { } } + /** + * Returns the name of the current cluster-manager node in the cluster. + * + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #getClusterManagerName()} + */ + @Deprecated + public String getMasterName() { + return getClusterManagerName(); + } + + /** + * Returns the name of the current cluster-manager node in the cluster and executes the request via the node specified + * in the viaNode parameter. If viaNode isn't specified a random node will be picked to the send the request to. + * + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #getClusterManagerName(String)} + */ + @Deprecated + public String getMasterName(@Nullable String viaNode) { + return getClusterManagerName(viaNode); + } + synchronized Set allDataNodesButN(int count) { final int numNodes = numDataNodes() - count; assert size() >= numNodes; @@ -2021,7 +2127,7 @@ private List bootstrapClusterManagerNodeWithSpecifiedIndex(List newSettings = new ArrayList<>(); for (Settings settings : allNodesSettings) { @@ -2034,7 +2140,7 @@ private List bootstrapClusterManagerNodeWithSpecifiedIndex(List nodeNames = new ArrayList<>(); - for (Settings nodeSettings : getDataOrMasterNodeInstances(Settings.class)) { + for (Settings nodeSettings : getDataOrClusterManagerNodeInstances(Settings.class)) { if (DiscoveryNode.isClusterManagerNode(nodeSettings)) { nodeNames.add(Node.NODE_NAME_SETTING.get(nodeSettings)); } @@ -2157,12 +2263,24 @@ public synchronized List startNodes(Settings... extraSettings) { return nodes.stream().map(NodeAndClient::getName).collect(Collectors.toList()); } + public List startClusterManagerOnlyNodes(int numNodes) { + return startClusterManagerOnlyNodes(numNodes, Settings.EMPTY); + } + + public List startClusterManagerOnlyNodes(int numNodes, Settings settings) { + return startNodes(numNodes, Settings.builder().put(onlyRole(settings, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)).build()); + } + + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #startClusterManagerOnlyNodes(int)} */ + @Deprecated public List startMasterOnlyNodes(int numNodes) { - return startMasterOnlyNodes(numNodes, Settings.EMPTY); + return startClusterManagerOnlyNodes(numNodes); } + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #startClusterManagerOnlyNodes(int, Settings)} */ + @Deprecated public List startMasterOnlyNodes(int numNodes, Settings settings) { - return startNodes(numNodes, Settings.builder().put(onlyRole(settings, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)).build()); + return startClusterManagerOnlyNodes(numNodes, settings); } public List startDataOnlyNodes(int numNodes) { @@ -2191,6 +2309,18 @@ public String startClusterManagerOnlyNode(Settings settings) { return startNode(settings1); } + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #startClusterManagerOnlyNode()} */ + @Deprecated + public String startMasterOnlyNode() { + return startClusterManagerOnlyNode(); + } + + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #startClusterManagerOnlyNode(Settings)} */ + @Deprecated + public String startMasterOnlyNode(Settings settings) { + return startClusterManagerOnlyNode(settings); + } + public String startDataOnlyNode() { return startDataOnlyNode(Settings.EMPTY); } @@ -2222,8 +2352,14 @@ public int numDataAndMasterNodes() { return filterNodes(nodes, DATA_NODE_PREDICATE.or(CLUSTER_MANAGER_NODE_PREDICATE)).size(); } + public int numClusterManagerNodes() { + return filterNodes(nodes, NodeAndClient::isClusterManagerEligible).size(); + } + + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #numClusterManagerNodes()} */ + @Deprecated public int numMasterNodes() { - return filterNodes(nodes, NodeAndClient::isMasterEligible).size(); + return numClusterManagerNodes(); } public void setDisruptionScheme(ServiceDisruptionScheme scheme) { diff --git a/test/framework/src/main/java/org/opensearch/test/NodeRoles.java b/test/framework/src/main/java/org/opensearch/test/NodeRoles.java index 4f448e230a2b6..958b6c81def34 100644 --- a/test/framework/src/main/java/org/opensearch/test/NodeRoles.java +++ b/test/framework/src/main/java/org/opensearch/test/NodeRoles.java @@ -168,11 +168,11 @@ public static Settings nonIngestNode(final Settings settings) { return removeRoles(settings, Collections.singleton(DiscoveryNodeRole.INGEST_ROLE)); } - public static Settings masterNode() { - return masterNode(Settings.EMPTY); + public static Settings clusterManagerNode() { + return clusterManagerNode(Settings.EMPTY); } - public static Settings masterNode(final Settings settings) { + public static Settings clusterManagerNode(final Settings settings) { return addRoles(settings, Collections.singleton(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)); } @@ -184,12 +184,48 @@ public static Settings clusterManagerOnlyNode(final Settings settings) { return onlyRole(settings, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); } + public static Settings nonClusterManagerNode() { + return nonClusterManagerNode(Settings.EMPTY); + } + + public static Settings nonClusterManagerNode(final Settings settings) { + return removeRoles(settings, Collections.singleton(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)); + } + + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerNode()} */ + @Deprecated + public static Settings masterNode() { + return clusterManagerNode(); + } + + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerNode(Settings)} */ + @Deprecated + public static Settings masterNode(final Settings settings) { + return clusterManagerNode(settings); + } + + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerOnlyNode()} */ + @Deprecated + public static Settings masterOnlyNode() { + return clusterManagerOnlyNode(); + } + + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerOnlyNode(Settings)} */ + @Deprecated + public static Settings masterOnlyNode(final Settings settings) { + return clusterManagerOnlyNode(settings); + } + + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #nonClusterManagerNode()} */ + @Deprecated public static Settings nonMasterNode() { - return nonMasterNode(Settings.EMPTY); + return nonClusterManagerNode(); } + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #nonClusterManagerNode(Settings)} */ + @Deprecated public static Settings nonMasterNode(final Settings settings) { - return removeRoles(settings, Collections.singleton(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)); + return nonClusterManagerNode(settings); } public static Settings remoteClusterClientNode() { diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 6344fe3435675..0f868efdd5c45 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -713,7 +713,7 @@ public void setDisruptionScheme(ServiceDisruptionScheme scheme) { * @return disruption */ protected static NetworkDisruption isolateClusterManagerDisruption(NetworkDisruption.NetworkLinkDisruptionType disruptionType) { - final String clusterManagerNode = internalCluster().getMasterName(); + final String clusterManagerNode = internalCluster().getClusterManagerName(); return new NetworkDisruption( new NetworkDisruption.TwoPartitions( Collections.singleton(clusterManagerNode), diff --git a/test/framework/src/main/java/org/opensearch/test/disruption/BlockMasterServiceOnMaster.java b/test/framework/src/main/java/org/opensearch/test/disruption/BlockMasterServiceOnMaster.java index 85f8e5c250066..19aaba154109d 100644 --- a/test/framework/src/main/java/org/opensearch/test/disruption/BlockMasterServiceOnMaster.java +++ b/test/framework/src/main/java/org/opensearch/test/disruption/BlockMasterServiceOnMaster.java @@ -53,7 +53,7 @@ public BlockMasterServiceOnMaster(Random random) { @Override public void startDisrupting() { - disruptedNode = cluster.getMasterName(); + disruptedNode = cluster.getClusterManagerName(); final String disruptionNodeCopy = disruptedNode; if (disruptionNodeCopy == null) { return; diff --git a/test/framework/src/main/java/org/opensearch/test/disruption/BusyMasterServiceDisruption.java b/test/framework/src/main/java/org/opensearch/test/disruption/BusyMasterServiceDisruption.java index 4830f9b0359fb..d764c23404a91 100644 --- a/test/framework/src/main/java/org/opensearch/test/disruption/BusyMasterServiceDisruption.java +++ b/test/framework/src/main/java/org/opensearch/test/disruption/BusyMasterServiceDisruption.java @@ -52,7 +52,7 @@ public BusyMasterServiceDisruption(Random random, Priority priority) { @Override public void startDisrupting() { - disruptedNode = cluster.getMasterName(); + disruptedNode = cluster.getClusterManagerName(); final String disruptionNodeCopy = disruptedNode; if (disruptionNodeCopy == null) { return; diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestClient.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestClient.java index 6a87a89db4959..56ccb91dc3331 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestClient.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestClient.java @@ -105,6 +105,12 @@ public Version getClusterManagerVersion() { return clusterManagerVersion; } + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #getClusterManagerVersion()} */ + @Deprecated + public Version getMasterVersion() { + return getClusterManagerVersion(); + } + /** * Calls an api with the provided parameters and body */ diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestExecutionContext.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestExecutionContext.java index 8e6bbd009dc2f..78818aefe44cc 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestExecutionContext.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestExecutionContext.java @@ -227,8 +227,14 @@ public Version esVersion() { return clientYamlTestClient.getEsVersion(); } - public Version masterVersion() { + public Version clusterManagerVersion() { return clientYamlTestClient.getClusterManagerVersion(); } + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerVersion()} */ + @Deprecated + public Version masterVersion() { + return clusterManagerVersion(); + } + } diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/DoSection.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/DoSection.java index 6fdc77984c339..f71c67ce456bc 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/DoSection.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/DoSection.java @@ -321,7 +321,7 @@ public void execute(ClientYamlTestExecutionContext executionContext) throws IOEx } fail(formatStatusCodeMessage(response, catchStatusCode)); } - checkWarningHeaders(response.getWarningHeaders(), executionContext.masterVersion()); + checkWarningHeaders(response.getWarningHeaders(), executionContext.clusterManagerVersion()); } catch (ClientYamlTestResponseException e) { ClientYamlTestResponse restTestResponse = e.getRestTestResponse(); if (!Strings.hasLength(catchParam)) { diff --git a/test/framework/src/test/java/org/opensearch/test/test/InternalTestClusterTests.java b/test/framework/src/test/java/org/opensearch/test/test/InternalTestClusterTests.java index a0b392f3fa669..b802b48960f61 100644 --- a/test/framework/src/test/java/org/opensearch/test/test/InternalTestClusterTests.java +++ b/test/framework/src/test/java/org/opensearch/test/test/InternalTestClusterTests.java @@ -304,13 +304,13 @@ public Path nodeConfigPath(int nodeOrdinal) { ); try { cluster.beforeTest(random()); - final int originalClusterManagerCount = cluster.numMasterNodes(); + final int originalClusterManagerCount = cluster.numClusterManagerNodes(); final Map shardNodePaths = new HashMap<>(); for (String name : cluster.getNodeNames()) { shardNodePaths.put(name, getNodePaths(cluster, name)); } String poorNode = randomValueOtherThanMany( - n -> originalClusterManagerCount == 1 && n.equals(cluster.getMasterName()), + n -> originalClusterManagerCount == 1 && n.equals(cluster.getClusterManagerName()), () -> randomFrom(cluster.getNodeNames()) ); Path dataPath = getNodePaths(cluster, poorNode)[0]; From 1c787e8e28e04ca7f07ffd47b91fb6ff088d9648 Mon Sep 17 00:00:00 2001 From: piyush Date: Fri, 22 Jul 2022 13:04:55 -0700 Subject: [PATCH 033/104] Parallelize stale blobs deletion during snapshot delete (#3796) * Parallelize stale blobs deletion during snapshot delete Signed-off-by: Piyush Daftary * Adding test which throws exception Signed-off-by: Piyush Daftary * Adusting identation for spotlessJavaCheck Signed-off-by: Piyush Daftary * Adding more description to MAX_SHARD_BLOB_DELETE_BATCH_SIZE Signed-off-by: Piyush Daftary * Renaming max_shard_blob_delete_batch_size to max_snapshot_shard_blob_delete_batch_size Signed-off-by: Piyush Daftary --- .../opensearch/snapshots/RepositoriesIT.java | 124 ++++++++++++++++++ .../blobstore/BlobStoreRepository.java | 73 +++++++++-- .../snapshots/mockstore/MockRepository.java | 3 + 3 files changed, 192 insertions(+), 8 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java index 197a8a10f3a20..9aaafa64ce60c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java @@ -35,6 +35,8 @@ import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; import org.opensearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.action.bulk.BulkRequest; +import org.opensearch.action.index.IndexRequest; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.metadata.Metadata; @@ -46,6 +48,7 @@ import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.RepositoryException; import org.opensearch.repositories.RepositoryVerificationException; +import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.snapshots.mockstore.MockRepository; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.threadpool.ThreadPool; @@ -328,4 +331,125 @@ public void testRepositoryVerification() throws Exception { assertThat(ex.getMessage(), containsString("is not shared")); } } + + public void testSnapshotShardBlobDelete() throws Exception { + Client client = client(); + Path repositoryPath = randomRepoPath(); + final String repositoryName = "test-repo"; + final String firstSnapshot = "first-snapshot"; + final String secondSnapshot = "second-snapshot"; + final String indexName = "test-idx"; + + logger.info("--> creating repository at {}", repositoryPath.toAbsolutePath()); + int maxShardBlobDeleteBatchSize = randomIntBetween(1, 1000); + createRepository( + "test-repo", + "mock", + Settings.builder() + .put("location", repositoryPath) + .put(BlobStoreRepository.MAX_SNAPSHOT_SHARD_BLOB_DELETE_BATCH_SIZE.getKey(), maxShardBlobDeleteBatchSize) + ); + + logger.info("--> creating index-0 and ingest data"); + createIndex(indexName); + ensureGreen(); + for (int j = 0; j < randomIntBetween(1, 1000); j++) { + index(indexName, "_doc", Integer.toString(j), "foo", "bar" + j); + } + refresh(); + + logger.info("--> creating first snapshot"); + createFullSnapshot(repositoryName, firstSnapshot); + + int numberOfFiles = numberOfFiles(repositoryPath); + + logger.info("--> adding some more documents to test index"); + for (int j = 0; j < randomIntBetween(100, 10000); ++j) { + final BulkRequest bulkRequest = new BulkRequest(); + for (int i = 0; i < randomIntBetween(100, 1000); ++i) { + bulkRequest.add(new IndexRequest(indexName).source("foo" + j, "bar" + i)); + } + client().bulk(bulkRequest).get(); + } + refresh(); + + logger.info("--> creating second snapshot"); + createFullSnapshot(repositoryName, secondSnapshot); + + // Delete second snapshot + logger.info("--> delete second snapshot"); + client.admin().cluster().prepareDeleteSnapshot(repositoryName, secondSnapshot).get(); + + logger.info("--> make sure that number of files is back to what it was when the first snapshot was made"); + assertFileCount(repositoryPath, numberOfFiles); + + logger.info("--> done"); + } + + public void testSnapshotShardBlobDeletionRepositoryThrowingError() throws Exception { + Client client = client(); + Path repositoryPath = randomRepoPath(); + final String repositoryName = "test-repo"; + final String firstSnapshot = "first-snapshot"; + final String secondSnapshot = "second-snapshot"; + final String indexName = "test-idx"; + + logger.info("--> creating repository at {}", repositoryPath.toAbsolutePath()); + int maxShardBlobDeleteBatchSize = randomIntBetween(1, 1000); + createRepository( + "test-repo", + "mock", + Settings.builder() + .put("location", repositoryPath) + .put(BlobStoreRepository.MAX_SNAPSHOT_SHARD_BLOB_DELETE_BATCH_SIZE.getKey(), maxShardBlobDeleteBatchSize) + ); + + logger.info("--> creating index-0 and ingest data"); + createIndex(indexName); + ensureGreen(); + for (int j = 0; j < randomIntBetween(1, 1000); j++) { + index(indexName, "_doc", Integer.toString(j), "foo", "bar" + j); + } + refresh(); + + logger.info("--> creating first snapshot"); + createFullSnapshot(repositoryName, firstSnapshot); + + logger.info("--> adding some more documents to test index"); + for (int j = 0; j < randomIntBetween(100, 1000); ++j) { + final BulkRequest bulkRequest = new BulkRequest(); + for (int i = 0; i < randomIntBetween(100, 1000); ++i) { + bulkRequest.add(new IndexRequest(indexName).source("foo" + j, "bar" + i)); + } + client().bulk(bulkRequest).get(); + } + refresh(); + + logger.info("--> creating second snapshot"); + createFullSnapshot(repositoryName, secondSnapshot); + + // Make repository to throw exception when trying to delete stale snapshot shard blobs + String clusterManagerNode = internalCluster().getMasterName(); + ((MockRepository) internalCluster().getInstance(RepositoriesService.class, clusterManagerNode).repository("test-repo")) + .setThrowExceptionWhileDelete(true); + + // Delete second snapshot + logger.info("--> delete second snapshot"); + client.admin().cluster().prepareDeleteSnapshot(repositoryName, secondSnapshot).get(); + + // Make repository to work normally + ((MockRepository) internalCluster().getInstance(RepositoriesService.class, clusterManagerNode).repository("test-repo")) + .setThrowExceptionWhileDelete(false); + + // This snapshot should delete last snapshot's residual stale shard blobs as well + logger.info("--> delete first snapshot"); + client.admin().cluster().prepareDeleteSnapshot(repositoryName, firstSnapshot).get(); + + // Expect two files to remain in the repository: + // (1) index-(N+1) + // (2) index-latest + assertFileCount(repositoryPath, 2); + + logger.info("--> done"); + } } diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index 6eaec491c8177..c36d92abcf498 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -147,6 +147,7 @@ import java.util.concurrent.Executor; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; @@ -235,6 +236,17 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp Setting.Property.NodeScope ); + /** + * Setting to set batch size of stale snapshot shard blobs that will be deleted by snapshot workers as part of snapshot deletion. + * For optimal performance the value of the setting should be equal to or close to repository's max # of keys that can be deleted in single operation + * Most cloud storage support upto 1000 key(s) deletion in single operation, thus keeping default value to be 1000. + */ + public static final Setting MAX_SNAPSHOT_SHARD_BLOB_DELETE_BATCH_SIZE = Setting.intSetting( + "max_snapshot_shard_blob_delete_batch_size", + 1000, // the default maximum batch size of stale snapshot shard blobs deletion + Setting.Property.NodeScope + ); + /** * Setting to disable writing the {@code index.latest} blob which enables the contents of this repository to be used with a * url-repository. @@ -243,6 +255,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp protected final boolean supportURLRepo; + private final int maxShardBlobDeleteBatch; + private final boolean compress; private final boolean cacheRepositoryData; @@ -358,6 +372,7 @@ protected BlobStoreRepository( readOnly = metadata.settings().getAsBoolean("readonly", false); cacheRepositoryData = CACHE_REPOSITORY_DATA.get(metadata.settings()); bufferSize = Math.toIntExact(BUFFER_SIZE_SETTING.get(metadata.settings()).getBytes()); + maxShardBlobDeleteBatch = MAX_SNAPSHOT_SHARD_BLOB_DELETE_BATCH_SIZE.get(metadata.settings()); } @Override @@ -902,15 +917,57 @@ private void asyncCleanupUnlinkedShardLevelBlobs( listener.onResponse(null); return; } - threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(ActionRunnable.wrap(listener, l -> { - try { - deleteFromContainer(blobContainer(), filesToDelete); - l.onResponse(null); - } catch (Exception e) { - logger.warn(() -> new ParameterizedMessage("{} Failed to delete some blobs during snapshot delete", snapshotIds), e); - throw e; + + try { + AtomicInteger counter = new AtomicInteger(); + Collection> subList = filesToDelete.stream() + .collect(Collectors.groupingBy(it -> counter.getAndIncrement() / maxShardBlobDeleteBatch)) + .values(); + final BlockingQueue> staleFilesToDeleteInBatch = new LinkedBlockingQueue<>(subList); + + final GroupedActionListener groupedListener = new GroupedActionListener<>( + ActionListener.wrap(r -> { listener.onResponse(null); }, listener::onFailure), + staleFilesToDeleteInBatch.size() + ); + + // Start as many workers as fit into the snapshot pool at once at the most + final int workers = Math.min(threadPool.info(ThreadPool.Names.SNAPSHOT).getMax(), staleFilesToDeleteInBatch.size()); + for (int i = 0; i < workers; ++i) { + executeStaleShardDelete(staleFilesToDeleteInBatch, groupedListener); } - })); + + } catch (Exception e) { + // TODO: We shouldn't be blanket catching and suppressing all exceptions here and instead handle them safely upstream. + // Currently this catch exists as a stop gap solution to tackle unexpected runtime exceptions from implementations + // bubbling up and breaking the snapshot functionality. + assert false : e; + logger.warn(new ParameterizedMessage("[{}] Exception during cleanup of stale shard blobs", snapshotIds), e); + listener.onFailure(e); + } + } + + private void executeStaleShardDelete(BlockingQueue> staleFilesToDeleteInBatch, GroupedActionListener listener) + throws InterruptedException { + List filesToDelete = staleFilesToDeleteInBatch.poll(0L, TimeUnit.MILLISECONDS); + if (filesToDelete != null) { + threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(ActionRunnable.wrap(listener, l -> { + try { + deleteFromContainer(blobContainer(), filesToDelete); + l.onResponse(null); + } catch (Exception e) { + logger.warn( + () -> new ParameterizedMessage( + "[{}] Failed to delete following blobs during snapshot delete : {}", + metadata.name(), + filesToDelete + ), + e + ); + l.onFailure(e); + } + executeStaleShardDelete(staleFilesToDeleteInBatch, listener); + })); + } } // updates the shard state metadata for shards of a snapshot that is to be deleted. Also computes the files to be cleaned up. diff --git a/test/framework/src/main/java/org/opensearch/snapshots/mockstore/MockRepository.java b/test/framework/src/main/java/org/opensearch/snapshots/mockstore/MockRepository.java index 4e641f9505e3e..fc8ff3ce841e7 100644 --- a/test/framework/src/main/java/org/opensearch/snapshots/mockstore/MockRepository.java +++ b/test/framework/src/main/java/org/opensearch/snapshots/mockstore/MockRepository.java @@ -474,6 +474,9 @@ public void deleteBlobsIgnoringIfNotExists(List blobNames) throws IOExce if (blockOnDeleteIndexN && blobNames.stream().anyMatch(name -> name.startsWith(BlobStoreRepository.INDEX_FILE_PREFIX))) { blockExecutionAndMaybeWait("index-{N}"); } + if (setThrowExceptionWhileDelete) { + throw new IOException("Random exception"); + } super.deleteBlobsIgnoringIfNotExists(blobNames); } From efde8c55abc790b57f0d755960d9be0219eee8e4 Mon Sep 17 00:00:00 2001 From: Rabi Panda Date: Sat, 23 Jul 2022 14:07:40 -0700 Subject: [PATCH 034/104] Add guidelines for code contributions (#3976) A set of guidelines to decide whether a feature should be included in OpenSearch. Signed-off-by: Rabi Panda --- CONTRIBUTING.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 9a68cde006a6f..467c7716cc578 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -59,6 +59,16 @@ If you would like to contribute to the documentation, please do so in the [docum As with other types of contributions, the first step is to [**open an issue on GitHub**](https://github.com/opensearch-project/OpenSearch/issues/new/choose). Opening an issue before you make changes makes sure that someone else isn't already working on that particular problem. It also lets us all work together to find the right approach before you spend a bunch of time on a PR. So again, when in doubt, open an issue. +Additionally, here are a few guidelines to help you decide whether a particular feature should be included in OpenSearch. + +**Is your feature important to most users of OpenSearch?** + +If you believe that a feature is going to fulfill a need for most users of OpenSearch, then it belongs in OpenSearch. However, we don't want every feature built into the core server. If the feature requires additional permissions or brings in extra dependencies it should instead be included as a module in core. + +**Is your feature a common dependency across multiple plugins?** + +Does this feature contain functionality that cuts across multiple plugins? If so, this most likely belongs in OpenSearch as a core module or plugin. + Once you've opened an issue, check out our [Developer Guide](./DEVELOPER_GUIDE.md) for instructions on how to get started. ## Developer Certificate of Origin From b24b02fe3b626d30bc4b833c80f4282fc4b9e6f0 Mon Sep 17 00:00:00 2001 From: Cole White <42356806+shdubsh@users.noreply.github.com> Date: Tue, 26 Jul 2022 00:51:04 +0000 Subject: [PATCH 035/104] Use bash in systemd-entrypoint shebang (#4008) Uses a shell environment where `-o pipefail` is available. Closes #4005 Signed-off-by: Cole White --- distribution/packages/src/common/systemd/systemd-entrypoint | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/distribution/packages/src/common/systemd/systemd-entrypoint b/distribution/packages/src/common/systemd/systemd-entrypoint index d9eacdb7812af..de59b4573f79a 100644 --- a/distribution/packages/src/common/systemd/systemd-entrypoint +++ b/distribution/packages/src/common/systemd/systemd-entrypoint @@ -1,4 +1,4 @@ -#!/bin/sh +#!/usr/bin/env bash # This wrapper script allows SystemD to feed a file containing a passphrase into # the main OpenSearch startup script From a34a4c04250b5c4c210ce79d14611953f5267963 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Tue, 26 Jul 2022 09:58:47 -0400 Subject: [PATCH 036/104] Update merge on refresh and merge on commit defaults in Opensearch (Lucene 9.3) (#3561) * Update merge on refresh and merge on commit defaults in Opensearch (Lucene 9.3) Signed-off-by: Andriy Redko * Addressing code review comments, adding more tests Signed-off-by: Andriy Redko --- .../decider/DiskThresholdDeciderIT.java | 1 - .../common/settings/IndexScopedSettings.java | 1 + .../org/opensearch/index/IndexSettings.java | 50 +++++++++++++++++-- .../index/engine/InternalEngine.java | 16 +++--- .../opensearch/common/lucene/LuceneTests.java | 6 +-- .../index/engine/InternalEngineTests.java | 45 +++++++++++++++-- .../index/shard/ShardGetServiceTests.java | 1 - 7 files changed, 98 insertions(+), 22 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java index 45daea3d066d3..fcfcec3445a80 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java @@ -153,7 +153,6 @@ protected Collection> nodePlugins() { return Collections.singletonList(InternalSettingsPlugin.class); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/pull/3561") public void testHighWatermarkNotExceeded() throws Exception { internalCluster().startClusterManagerOnlyNode(); internalCluster().startDataOnlyNode(); diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index e6a82d36a18a9..a2aac1f2c54c5 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -194,6 +194,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_SETTING, IndexSettings.INDEX_MERGE_ON_FLUSH_ENABLED, IndexSettings.INDEX_MERGE_ON_FLUSH_MAX_FULL_FLUSH_MERGE_WAIT_TIME, + IndexSettings.INDEX_MERGE_ON_FLUSH_POLICY, // validate that built-in similarities don't get redefined Setting.groupSetting("index.similarity.", (s) -> { diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index eb8ba98e61890..7c9b9755a2434 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -32,11 +32,12 @@ package org.opensearch.index; import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.util.Strings; import org.apache.lucene.index.MergePolicy; +import org.apache.lucene.sandbox.index.MergeOnFlushMergePolicy; import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.Strings; import org.opensearch.common.logging.Loggers; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Setting; @@ -53,9 +54,11 @@ import java.util.Collections; import java.util.List; import java.util.Locale; +import java.util.Optional; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; import java.util.function.Function; +import java.util.function.UnaryOperator; import static org.opensearch.index.mapper.MapperService.INDEX_MAPPING_DEPTH_LIMIT_SETTING; import static org.opensearch.index.mapper.MapperService.INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING; @@ -73,6 +76,9 @@ * @opensearch.internal */ public final class IndexSettings { + private static final String MERGE_ON_FLUSH_DEFAULT_POLICY = "default"; + private static final String MERGE_ON_FLUSH_MERGE_POLICY = "merge-on-flush"; + public static final Setting> DEFAULT_FIELD_SETTING = Setting.listSetting( "index.query.default_field", Collections.singletonList("*"), @@ -526,14 +532,21 @@ public final class IndexSettings { public static final Setting INDEX_MERGE_ON_FLUSH_MAX_FULL_FLUSH_MERGE_WAIT_TIME = Setting.timeSetting( "index.merge_on_flush.max_full_flush_merge_wait_time", new TimeValue(10, TimeUnit.SECONDS), - new TimeValue(0, TimeUnit.MILLISECONDS), + new TimeValue(1, TimeUnit.MILLISECONDS), Property.Dynamic, Property.IndexScope ); public static final Setting INDEX_MERGE_ON_FLUSH_ENABLED = Setting.boolSetting( "index.merge_on_flush.enabled", - false, + true, /* https://issues.apache.org/jira/browse/LUCENE-10078 */ + Property.IndexScope, + Property.Dynamic + ); + + public static final Setting INDEX_MERGE_ON_FLUSH_POLICY = Setting.simpleString( + "index.merge_on_flush.policy", + MERGE_ON_FLUSH_DEFAULT_POLICY, Property.IndexScope, Property.Dynamic ); @@ -632,6 +645,10 @@ private void setRetentionLeaseMillis(final TimeValue retentionLease) { * Is merge of flush enabled or not */ private volatile boolean mergeOnFlushEnabled; + /** + * Specialized merge-on-flush policy if provided + */ + private volatile UnaryOperator mergeOnFlushPolicy; /** * Returns the default search fields for this index. @@ -750,6 +767,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti mappingFieldNameLengthLimit = scopedSettings.get(INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING); maxFullFlushMergeWaitTime = scopedSettings.get(INDEX_MERGE_ON_FLUSH_MAX_FULL_FLUSH_MERGE_WAIT_TIME); mergeOnFlushEnabled = scopedSettings.get(INDEX_MERGE_ON_FLUSH_ENABLED); + setMergeOnFlushPolicy(scopedSettings.get(INDEX_MERGE_ON_FLUSH_POLICY)); scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING, mergePolicyConfig::setNoCFSRatio); scopedSettings.addSettingsUpdateConsumer( @@ -822,6 +840,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti scopedSettings.addSettingsUpdateConsumer(INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING, this::setMappingFieldNameLengthLimit); scopedSettings.addSettingsUpdateConsumer(INDEX_MERGE_ON_FLUSH_MAX_FULL_FLUSH_MERGE_WAIT_TIME, this::setMaxFullFlushMergeWaitTime); scopedSettings.addSettingsUpdateConsumer(INDEX_MERGE_ON_FLUSH_ENABLED, this::setMergeOnFlushEnabled); + scopedSettings.addSettingsUpdateConsumer(INDEX_MERGE_ON_FLUSH_POLICY, this::setMergeOnFlushPolicy); } private void setSearchIdleAfter(TimeValue searchIdleAfter) { @@ -892,7 +911,7 @@ public String getUUID() { * Returns true if the index has a custom data path */ public boolean hasCustomDataPath() { - return Strings.isNotEmpty(customDataPath()); + return !Strings.isEmpty(customDataPath()); } /** @@ -1426,4 +1445,27 @@ public TimeValue getMaxFullFlushMergeWaitTime() { public boolean isMergeOnFlushEnabled() { return mergeOnFlushEnabled; } + + private void setMergeOnFlushPolicy(String policy) { + if (Strings.isEmpty(policy) || MERGE_ON_FLUSH_DEFAULT_POLICY.equalsIgnoreCase(policy)) { + mergeOnFlushPolicy = null; + } else if (MERGE_ON_FLUSH_MERGE_POLICY.equalsIgnoreCase(policy)) { + this.mergeOnFlushPolicy = MergeOnFlushMergePolicy::new; + } else { + throw new IllegalArgumentException( + "The " + + IndexSettings.INDEX_MERGE_ON_FLUSH_POLICY.getKey() + + " has unsupported policy specified: " + + policy + + ". Please use one of: " + + MERGE_ON_FLUSH_DEFAULT_POLICY + + ", " + + MERGE_ON_FLUSH_MERGE_POLICY + ); + } + } + + public Optional> getMergeOnFlushPolicy() { + return Optional.ofNullable(mergeOnFlushPolicy); + } } diff --git a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java index 69a168eb04ac7..7d90c2ad653be 100644 --- a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java @@ -50,7 +50,6 @@ import org.apache.lucene.index.SoftDeletesRetentionMergePolicy; import org.apache.lucene.index.StandardDirectoryReader; import org.apache.lucene.index.Term; -import org.apache.lucene.sandbox.index.MergeOnFlushMergePolicy; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.DocIdSetIterator; @@ -133,6 +132,7 @@ import java.util.concurrent.locks.ReentrantLock; import java.util.function.BiConsumer; import java.util.function.BiFunction; +import java.util.function.UnaryOperator; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -2295,14 +2295,14 @@ private IndexWriterConfig getIndexWriterConfig() { final long maxFullFlushMergeWaitMillis = config().getIndexSettings().getMaxFullFlushMergeWaitTime().millis(); if (maxFullFlushMergeWaitMillis > 0) { iwc.setMaxFullFlushMergeWaitMillis(maxFullFlushMergeWaitMillis); - mergePolicy = new MergeOnFlushMergePolicy(mergePolicy); - } else { - logger.warn( - "The {} is enabled but {} is set to 0, merge on flush will not be activated", - IndexSettings.INDEX_MERGE_ON_FLUSH_ENABLED.getKey(), - IndexSettings.INDEX_MERGE_ON_FLUSH_MAX_FULL_FLUSH_MERGE_WAIT_TIME.getKey() - ); + final Optional> mergeOnFlushPolicy = config().getIndexSettings().getMergeOnFlushPolicy(); + if (mergeOnFlushPolicy.isPresent()) { + mergePolicy = mergeOnFlushPolicy.get().apply(mergePolicy); + } } + } else { + // Disable merge on refresh + iwc.setMaxFullFlushMergeWaitMillis(0); } iwc.setMergePolicy(new OpenSearchMergePolicy(mergePolicy)); diff --git a/server/src/test/java/org/opensearch/common/lucene/LuceneTests.java b/server/src/test/java/org/opensearch/common/lucene/LuceneTests.java index 776b44d346fb5..0edcd55cc35c3 100644 --- a/server/src/test/java/org/opensearch/common/lucene/LuceneTests.java +++ b/server/src/test/java/org/opensearch/common/lucene/LuceneTests.java @@ -590,10 +590,8 @@ public void testWrapAllDocsLive() throws Exception { public void testWrapLiveDocsNotExposeAbortedDocuments() throws Exception { Directory dir = newDirectory(); IndexWriterConfig config = newIndexWriterConfig().setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) - .setMergePolicy(new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETES_FIELD, MatchAllDocsQuery::new, newMergePolicy())); - // override 500ms default introduced in - // https://issues.apache.org/jira/browse/LUCENE-10078 - config.setMaxFullFlushMergeWaitMillis(0); + .setMergePolicy(new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETES_FIELD, MatchAllDocsQuery::new, newMergePolicy())) + .setMaxFullFlushMergeWaitMillis(0); IndexWriter writer = new IndexWriter(dir, config); int numDocs = between(1, 10); List liveDocs = new ArrayList<>(); diff --git a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java index 918fc91e7076f..28a420403bcc1 100644 --- a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java @@ -503,8 +503,7 @@ public void testMergeSegmentsOnCommitIsDisabled() throws Exception { final Settings.Builder settings = Settings.builder() .put(defaultSettings.getSettings()) - .put(IndexSettings.INDEX_MERGE_ON_FLUSH_MAX_FULL_FLUSH_MERGE_WAIT_TIME.getKey(), TimeValue.timeValueMillis(0)) - .put(IndexSettings.INDEX_MERGE_ON_FLUSH_ENABLED.getKey(), true); + .put(IndexSettings.INDEX_MERGE_ON_FLUSH_ENABLED.getKey(), false); final IndexMetadata indexMetadata = IndexMetadata.builder(defaultSettings.getIndexMetadata()).settings(settings).build(); final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexMetadata); @@ -576,7 +575,7 @@ public void testMergeSegmentsOnCommit() throws Exception { final Settings.Builder settings = Settings.builder() .put(defaultSettings.getSettings()) .put(IndexSettings.INDEX_MERGE_ON_FLUSH_MAX_FULL_FLUSH_MERGE_WAIT_TIME.getKey(), TimeValue.timeValueMillis(5000)) - .put(IndexSettings.INDEX_MERGE_ON_FLUSH_ENABLED.getKey(), true); + .put(IndexSettings.INDEX_MERGE_ON_FLUSH_POLICY.getKey(), "merge-on-flush"); final IndexMetadata indexMetadata = IndexMetadata.builder(defaultSettings.getIndexMetadata()).settings(settings).build(); final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexMetadata); @@ -638,6 +637,44 @@ public void testMergeSegmentsOnCommit() throws Exception { } } + public void testMergeSegmentsOnCommitDefault() throws Exception { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + + final Settings.Builder settings = Settings.builder().put(defaultSettings.getSettings()); + final IndexMetadata indexMetadata = IndexMetadata.builder(defaultSettings.getIndexMetadata()).settings(settings).build(); + final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexMetadata); + + final TieredMergePolicy mergePolicy = new TieredMergePolicy(); + mergePolicy.setSegmentsPerTier(2); + + try ( + Store store = createStore(); + InternalEngine engine = createEngine( + config(indexSettings, store, createTempDir(), mergePolicy, null, null, globalCheckpoint::get) + ) + ) { + List segments = engine.segments(true); + assertThat(segments.isEmpty(), equalTo(true)); + + ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), B_1, null); + engine.index(indexForDoc(doc)); + engine.refresh("test"); + + segments = engine.segments(true); + assertThat(segments.size(), equalTo(1)); + + ParsedDocument doc2 = testParsedDocument("2", null, testDocumentWithTextField(), B_2, null); + engine.index(indexForDoc(doc2)); + engine.refresh("test"); + ParsedDocument doc3 = testParsedDocument("3", null, testDocumentWithTextField(), B_3, null); + engine.index(indexForDoc(doc3)); + engine.refresh("test"); + + segments = engine.segments(true); + assertThat(segments.size(), equalTo(2)); + } + } + // this test writes documents to the engine while concurrently flushing/commit public void testConcurrentMergeSegmentsOnCommit() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); @@ -645,7 +682,7 @@ public void testConcurrentMergeSegmentsOnCommit() throws Exception { final Settings.Builder settings = Settings.builder() .put(defaultSettings.getSettings()) .put(IndexSettings.INDEX_MERGE_ON_FLUSH_MAX_FULL_FLUSH_MERGE_WAIT_TIME.getKey(), TimeValue.timeValueMillis(5000)) - .put(IndexSettings.INDEX_MERGE_ON_FLUSH_ENABLED.getKey(), true); + .put(IndexSettings.INDEX_MERGE_ON_FLUSH_POLICY.getKey(), "merge-on-flush"); final IndexMetadata indexMetadata = IndexMetadata.builder(defaultSettings.getIndexMetadata()).settings(settings).build(); final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexMetadata); diff --git a/server/src/test/java/org/opensearch/index/shard/ShardGetServiceTests.java b/server/src/test/java/org/opensearch/index/shard/ShardGetServiceTests.java index 5dd053574268e..f29303ce8dda1 100644 --- a/server/src/test/java/org/opensearch/index/shard/ShardGetServiceTests.java +++ b/server/src/test/java/org/opensearch/index/shard/ShardGetServiceTests.java @@ -56,7 +56,6 @@ public void testGetForUpdate() throws IOException { .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .build(); IndexMetadata metadata = IndexMetadata.builder("test") .putMapping("{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") From f4b0eefe1ca058aede90692defb09fbe071926ab Mon Sep 17 00:00:00 2001 From: Bharathwaj G <58062316+bharath-techie@users.noreply.github.com> Date: Tue, 26 Jul 2022 21:03:57 +0530 Subject: [PATCH 037/104] Implementing Delete PIT service layer changes (#3949) * Delete pit service layer changes Signed-off-by: Bharathwaj G --- .../org/opensearch/action/ActionModule.java | 3 + .../action/search/CreatePitController.java | 47 +- .../action/search/DeletePitAction.java | 24 + .../action/search/DeletePitInfo.java | 83 +++ .../action/search/DeletePitRequest.java | 121 ++++ .../action/search/DeletePitResponse.java | 101 +++ .../opensearch/action/search/PitService.java | 133 ++++ .../action/search/SearchContextIdForNode.java | 4 +- .../action/search/SearchTransportService.java | 82 +++ .../search/TransportDeletePitAction.java | 101 +++ .../java/org/opensearch/client/Client.java | 7 + .../client/support/AbstractClient.java | 8 + .../org/opensearch/search/SearchService.java | 55 ++ .../search/CreatePitControllerTests.java | 82 ++- .../search/TransportDeletePitActionTests.java | 638 ++++++++++++++++++ .../search/CreatePitMultiNodeTests.java | 113 ++++ .../search/DeletePitMultiNodeTests.java | 333 +++++++++ .../search/DeletePitResponseTests.java | 67 ++ .../opensearch/search/SearchServiceTests.java | 39 ++ 19 files changed, 2033 insertions(+), 8 deletions(-) create mode 100644 server/src/main/java/org/opensearch/action/search/DeletePitAction.java create mode 100644 server/src/main/java/org/opensearch/action/search/DeletePitInfo.java create mode 100644 server/src/main/java/org/opensearch/action/search/DeletePitRequest.java create mode 100644 server/src/main/java/org/opensearch/action/search/DeletePitResponse.java create mode 100644 server/src/main/java/org/opensearch/action/search/PitService.java create mode 100644 server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java create mode 100644 server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java create mode 100644 server/src/test/java/org/opensearch/search/DeletePitMultiNodeTests.java create mode 100644 server/src/test/java/org/opensearch/search/DeletePitResponseTests.java diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index cf79c97e4ca64..71c900beb5319 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -233,11 +233,13 @@ import org.opensearch.action.main.TransportMainAction; import org.opensearch.action.search.ClearScrollAction; import org.opensearch.action.search.CreatePitAction; +import org.opensearch.action.search.DeletePitAction; import org.opensearch.action.search.MultiSearchAction; import org.opensearch.action.search.SearchAction; import org.opensearch.action.search.SearchScrollAction; import org.opensearch.action.search.TransportClearScrollAction; import org.opensearch.action.search.TransportCreatePitAction; +import org.opensearch.action.search.TransportDeletePitAction; import org.opensearch.action.search.TransportMultiSearchAction; import org.opensearch.action.search.TransportSearchAction; import org.opensearch.action.search.TransportSearchScrollAction; @@ -661,6 +663,7 @@ public void reg // point in time actions actions.register(CreatePitAction.INSTANCE, TransportCreatePitAction.class); + actions.register(DeletePitAction.INSTANCE, TransportDeletePitAction.class); return unmodifiableMap(actions.getRegistry()); } diff --git a/server/src/main/java/org/opensearch/action/search/CreatePitController.java b/server/src/main/java/org/opensearch/action/search/CreatePitController.java index 40ee810186eb2..f64dd3d7efae6 100644 --- a/server/src/main/java/org/opensearch/action/search/CreatePitController.java +++ b/server/src/main/java/org/opensearch/action/search/CreatePitController.java @@ -29,9 +29,12 @@ import org.opensearch.tasks.Task; import org.opensearch.transport.Transport; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.function.BiFunction; @@ -51,6 +54,7 @@ public class CreatePitController { private final ClusterService clusterService; private final TransportSearchAction transportSearchAction; private final NamedWriteableRegistry namedWriteableRegistry; + private final PitService pitService; private static final Logger logger = LogManager.getLogger(CreatePitController.class); public static final Setting PIT_INIT_KEEP_ALIVE = Setting.positiveTimeSetting( "point_in_time.init.keep_alive", @@ -63,12 +67,14 @@ public CreatePitController( SearchTransportService searchTransportService, ClusterService clusterService, TransportSearchAction transportSearchAction, - NamedWriteableRegistry namedWriteableRegistry + NamedWriteableRegistry namedWriteableRegistry, + PitService pitService ) { this.searchTransportService = searchTransportService; this.clusterService = clusterService; this.transportSearchAction = transportSearchAction; this.namedWriteableRegistry = namedWriteableRegistry; + this.pitService = pitService; } /** @@ -248,8 +254,47 @@ public void onResponse(final Collection responses) { @Override public void onFailure(final Exception e) { + cleanupContexts(contexts, createPITResponse.getId()); updatePitIdListener.onFailure(e); } }, size); } + + /** + * Cleanup all created PIT contexts in case of failure + */ + private void cleanupContexts(Collection contexts, String pitId) { + ActionListener deleteListener = new ActionListener<>() { + @Override + public void onResponse(DeletePitResponse response) { + // this is invoke and forget call + final StringBuilder failedPitsStringBuilder = new StringBuilder(); + response.getDeletePitResults() + .stream() + .filter(r -> !r.isSuccessful()) + .forEach(r -> failedPitsStringBuilder.append(r.getPitId()).append(",")); + logger.warn(() -> new ParameterizedMessage("Failed to delete PIT IDs {}", failedPitsStringBuilder.toString())); + if (logger.isDebugEnabled()) { + final StringBuilder successfulPitsStringBuilder = new StringBuilder(); + response.getDeletePitResults() + .stream() + .filter(r -> r.isSuccessful()) + .forEach(r -> successfulPitsStringBuilder.append(r.getPitId()).append(",")); + logger.debug(() -> new ParameterizedMessage("Deleted PIT with IDs {}", successfulPitsStringBuilder.toString())); + } + } + + @Override + public void onFailure(Exception e) { + logger.error("Cleaning up PIT contexts failed ", e); + } + }; + Map> nodeToContextsMap = new HashMap<>(); + for (SearchContextIdForNode context : contexts) { + List contextIdsForNode = nodeToContextsMap.getOrDefault(context.getNode(), new ArrayList<>()); + contextIdsForNode.add(new PitSearchContextIdForNode(pitId, context)); + nodeToContextsMap.put(context.getNode(), contextIdsForNode); + } + pitService.deletePitContexts(nodeToContextsMap, deleteListener); + } } diff --git a/server/src/main/java/org/opensearch/action/search/DeletePitAction.java b/server/src/main/java/org/opensearch/action/search/DeletePitAction.java new file mode 100644 index 0000000000000..aa305ecfe73ab --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/DeletePitAction.java @@ -0,0 +1,24 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.ActionType; + +/** + * Action type for deleting point in time searches + */ +public class DeletePitAction extends ActionType { + + public static final DeletePitAction INSTANCE = new DeletePitAction(); + public static final String NAME = "indices:data/read/point_in_time/delete"; + + private DeletePitAction() { + super(NAME, DeletePitResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java b/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java new file mode 100644 index 0000000000000..943199812771a --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java @@ -0,0 +1,83 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.common.ParseField; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.Writeable; +import org.opensearch.common.xcontent.ConstructingObjectParser; +import org.opensearch.common.xcontent.ToXContent; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.transport.TransportResponse; + +import java.io.IOException; + +import static org.opensearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * This class captures if deletion of pit is successful along with pit id + */ +public class DeletePitInfo extends TransportResponse implements Writeable, ToXContent { + /** + * This will be true if PIT reader contexts are deleted ond also if contexts are not found. + */ + private final boolean successful; + + private final String pitId; + + public DeletePitInfo(boolean successful, String pitId) { + this.successful = successful; + this.pitId = pitId; + } + + public DeletePitInfo(StreamInput in) throws IOException { + successful = in.readBoolean(); + pitId = in.readString(); + + } + + public boolean isSuccessful() { + return successful; + } + + public String getPitId() { + return pitId; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeBoolean(successful); + out.writeString(pitId); + } + + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "delete_pit_info", + true, + args -> new DeletePitInfo((boolean) args[0], (String) args[1]) + ); + + static { + PARSER.declareBoolean(constructorArg(), new ParseField("successful")); + PARSER.declareString(constructorArg(), new ParseField("pitId")); + } + + private static final ParseField SUCCESSFUL = new ParseField("successful"); + private static final ParseField PIT_ID = new ParseField("pitId"); + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(SUCCESSFUL.getPreferredName(), successful); + builder.field(PIT_ID.getPreferredName(), pitId); + builder.endObject(); + return builder; + } + +} diff --git a/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java b/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java new file mode 100644 index 0000000000000..945fcfd17eb6c --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java @@ -0,0 +1,121 @@ + +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.ActionRequest; +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.xcontent.ToXContent; +import org.opensearch.common.xcontent.ToXContentObject; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static org.opensearch.action.ValidateActions.addValidationError; + +/** + * Request to delete one or more PIT search contexts based on IDs. + */ +public class DeletePitRequest extends ActionRequest implements ToXContentObject { + + /** + * List of PIT IDs to be deleted , and use "_all" to delete all PIT reader contexts + */ + private final List pitIds = new ArrayList<>(); + + public DeletePitRequest(StreamInput in) throws IOException { + super(in); + pitIds.addAll(Arrays.asList(in.readStringArray())); + } + + public DeletePitRequest(String... pitIds) { + this.pitIds.addAll(Arrays.asList(pitIds)); + } + + public DeletePitRequest(List pitIds) { + this.pitIds.addAll(pitIds); + } + + public DeletePitRequest() {} + + public List getPitIds() { + return pitIds; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (pitIds == null || pitIds.isEmpty()) { + validationException = addValidationError("no pit ids specified", validationException); + } + return validationException; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + if (pitIds == null) { + out.writeVInt(0); + } else { + out.writeStringArray(pitIds.toArray(new String[pitIds.size()])); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + builder.startArray("pit_id"); + for (String pitId : pitIds) { + builder.value(pitId); + } + builder.endArray(); + builder.endObject(); + return builder; + } + + public void fromXContent(XContentParser parser) throws IOException { + pitIds.clear(); + if (parser.nextToken() != XContentParser.Token.START_OBJECT) { + throw new IllegalArgumentException("Malformed content, must start with an object"); + } else { + XContentParser.Token token; + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if ("pit_id".equals(currentFieldName)) { + if (token == XContentParser.Token.START_ARRAY) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + if (token.isValue() == false) { + throw new IllegalArgumentException("pit_id array element should only contain pit_id"); + } + pitIds.add(parser.text()); + } + } else { + if (token.isValue() == false) { + throw new IllegalArgumentException("pit_id element should only contain pit_id"); + } + pitIds.add(parser.text()); + } + } else { + throw new IllegalArgumentException( + "Unknown parameter [" + currentFieldName + "] in request body or parameter is of the wrong type[" + token + "] " + ); + } + } + } + } + +} diff --git a/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java b/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java new file mode 100644 index 0000000000000..cdbeb3dc2b749 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java @@ -0,0 +1,101 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.ActionResponse; +import org.opensearch.common.ParseField; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.xcontent.ConstructingObjectParser; +import org.opensearch.common.xcontent.StatusToXContentObject; +import org.opensearch.common.xcontent.ToXContent; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.rest.RestStatus; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.opensearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.opensearch.rest.RestStatus.NOT_FOUND; +import static org.opensearch.rest.RestStatus.OK; + +/** + * Response class for delete pits flow which clears the point in time search contexts + */ +public class DeletePitResponse extends ActionResponse implements StatusToXContentObject { + + private final List deletePitResults; + + public DeletePitResponse(List deletePitResults) { + this.deletePitResults = deletePitResults; + } + + public DeletePitResponse(StreamInput in) throws IOException { + super(in); + int size = in.readVInt(); + deletePitResults = new ArrayList<>(); + for (int i = 0; i < size; i++) { + deletePitResults.add(new DeletePitInfo(in)); + } + + } + + public List getDeletePitResults() { + return deletePitResults; + } + + /** + * @return Whether the attempt to delete PIT was successful. + */ + @Override + public RestStatus status() { + if (deletePitResults.isEmpty()) return NOT_FOUND; + return OK; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(deletePitResults.size()); + for (DeletePitInfo deletePitResult : deletePitResults) { + deletePitResult.writeTo(out); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + builder.startArray("pits"); + for (DeletePitInfo response : deletePitResults) { + response.toXContent(builder, params); + } + builder.endArray(); + builder.endObject(); + return builder; + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "delete_pit_response", + true, + (Object[] parsedObjects) -> { + @SuppressWarnings("unchecked") + List deletePitInfoList = (List) parsedObjects[0]; + return new DeletePitResponse(deletePitInfoList); + } + ); + static { + PARSER.declareObjectArray(constructorArg(), DeletePitInfo.PARSER, new ParseField("pits")); + } + + public static DeletePitResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + +} diff --git a/server/src/main/java/org/opensearch/action/search/PitService.java b/server/src/main/java/org/opensearch/action/search/PitService.java new file mode 100644 index 0000000000000..4c5f1a1c0fc4f --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/PitService.java @@ -0,0 +1,133 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.action.ActionListener; +import org.opensearch.action.StepListener; +import org.opensearch.action.support.GroupedActionListener; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Strings; +import org.opensearch.common.inject.Inject; +import org.opensearch.transport.Transport; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.BiFunction; +import java.util.stream.Collectors; + +/** + * Service class for PIT reusable functions + */ +public class PitService { + + private static final Logger logger = LogManager.getLogger(PitService.class); + + private final ClusterService clusterService; + private final SearchTransportService searchTransportService; + + @Inject + public PitService(ClusterService clusterService, SearchTransportService searchTransportService) { + this.clusterService = clusterService; + this.searchTransportService = searchTransportService; + } + + /** + * Delete list of pit contexts. Returns the details of success of operation per PIT ID. + */ + public void deletePitContexts( + Map> nodeToContextsMap, + ActionListener listener + ) { + final Set clusters = nodeToContextsMap.values() + .stream() + .flatMap(Collection::stream) + .filter(ctx -> Strings.isEmpty(ctx.getSearchContextIdForNode().getClusterAlias()) == false) + .map(c -> c.getSearchContextIdForNode().getClusterAlias()) + .collect(Collectors.toSet()); + StepListener> lookupListener = (StepListener< + BiFunction>) SearchUtils.getConnectionLookupListener( + searchTransportService.getRemoteClusterService(), + clusterService.state(), + clusters + ); + lookupListener.whenComplete(nodeLookup -> { + final GroupedActionListener groupedListener = getDeletePitGroupedListener( + listener, + nodeToContextsMap.size() + ); + + for (Map.Entry> entry : nodeToContextsMap.entrySet()) { + String clusterAlias = entry.getValue().get(0).getSearchContextIdForNode().getClusterAlias(); + final DiscoveryNode node = nodeLookup.apply(clusterAlias, entry.getValue().get(0).getSearchContextIdForNode().getNode()); + if (node == null) { + logger.error( + () -> new ParameterizedMessage("node [{}] not found", entry.getValue().get(0).getSearchContextIdForNode().getNode()) + ); + List deletePitInfos = new ArrayList<>(); + for (PitSearchContextIdForNode pitSearchContextIdForNode : entry.getValue()) { + deletePitInfos.add(new DeletePitInfo(false, pitSearchContextIdForNode.getPitId())); + } + groupedListener.onResponse(new DeletePitResponse(deletePitInfos)); + } else { + try { + final Transport.Connection connection = searchTransportService.getConnection(clusterAlias, node); + searchTransportService.sendFreePITContexts(connection, entry.getValue(), groupedListener); + } catch (Exception e) { + logger.error(() -> new ParameterizedMessage("Delete PITs failed on node [{}]", node.getName()), e); + List deletePitInfos = new ArrayList<>(); + for (PitSearchContextIdForNode pitSearchContextIdForNode : entry.getValue()) { + deletePitInfos.add(new DeletePitInfo(false, pitSearchContextIdForNode.getPitId())); + } + groupedListener.onResponse(new DeletePitResponse(deletePitInfos)); + } + } + } + }, listener::onFailure); + } + + public GroupedActionListener getDeletePitGroupedListener(ActionListener listener, int size) { + return new GroupedActionListener<>(new ActionListener<>() { + @Override + public void onResponse(final Collection responses) { + Map pitIdToSucceededMap = new HashMap<>(); + for (DeletePitResponse response : responses) { + for (DeletePitInfo deletePitInfo : response.getDeletePitResults()) { + if (!pitIdToSucceededMap.containsKey(deletePitInfo.getPitId())) { + pitIdToSucceededMap.put(deletePitInfo.getPitId(), deletePitInfo.isSuccessful()); + } + if (!deletePitInfo.isSuccessful()) { + logger.debug(() -> new ParameterizedMessage("Deleting PIT with ID {} failed ", deletePitInfo.getPitId())); + pitIdToSucceededMap.put(deletePitInfo.getPitId(), deletePitInfo.isSuccessful()); + } + } + } + List deletePitResults = new ArrayList<>(); + for (Map.Entry entry : pitIdToSucceededMap.entrySet()) { + deletePitResults.add(new DeletePitInfo(entry.getValue(), entry.getKey())); + } + DeletePitResponse deletePitResponse = new DeletePitResponse(deletePitResults); + listener.onResponse(deletePitResponse); + } + + @Override + public void onFailure(final Exception e) { + logger.error("Delete PITs failed", e); + listener.onFailure(e); + } + }, size); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/SearchContextIdForNode.java b/server/src/main/java/org/opensearch/action/search/SearchContextIdForNode.java index df655c39f845d..7f218a3b1a17e 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchContextIdForNode.java +++ b/server/src/main/java/org/opensearch/action/search/SearchContextIdForNode.java @@ -45,12 +45,12 @@ * * @opensearch.internal */ -final class SearchContextIdForNode implements Writeable { +public final class SearchContextIdForNode implements Writeable { private final String node; private final ShardSearchContextId searchContextId; private final String clusterAlias; - SearchContextIdForNode(@Nullable String clusterAlias, String node, ShardSearchContextId searchContextId) { + public SearchContextIdForNode(@Nullable String clusterAlias, String node, ShardSearchContextId searchContextId) { this.node = node; this.clusterAlias = clusterAlias; this.searchContextId = searchContextId; diff --git a/server/src/main/java/org/opensearch/action/search/SearchTransportService.java b/server/src/main/java/org/opensearch/action/search/SearchTransportService.java index e667fdff53312..23515dd28b329 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/opensearch/action/search/SearchTransportService.java @@ -71,7 +71,9 @@ import org.opensearch.transport.TransportService; import java.io.IOException; +import java.util.ArrayList; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Objects; import java.util.function.BiFunction; @@ -87,6 +89,8 @@ public class SearchTransportService { public static final String FREE_CONTEXT_SCROLL_ACTION_NAME = "indices:data/read/search[free_context/scroll]"; public static final String FREE_CONTEXT_ACTION_NAME = "indices:data/read/search[free_context]"; public static final String CLEAR_SCROLL_CONTEXTS_ACTION_NAME = "indices:data/read/search[clear_scroll_contexts]"; + public static final String FREE_PIT_CONTEXT_ACTION_NAME = "indices:data/read/search[free_context/pit]"; + public static final String FREE_ALL_PIT_CONTEXTS_ACTION_NAME = "indices:data/read/search[free_pit_contexts]"; public static final String DFS_ACTION_NAME = "indices:data/read/search[phase/dfs]"; public static final String QUERY_ACTION_NAME = "indices:data/read/search[phase/query]"; public static final String QUERY_ID_ACTION_NAME = "indices:data/read/search[phase/query/id]"; @@ -200,6 +204,30 @@ public void sendClearAllScrollContexts(Transport.Connection connection, final Ac ); } + public void sendFreePITContexts( + Transport.Connection connection, + List contextIds, + ActionListener listener + ) { + transportService.sendRequest( + connection, + FREE_PIT_CONTEXT_ACTION_NAME, + new PitFreeContextsRequest(contextIds), + TransportRequestOptions.EMPTY, + new ActionListenerResponseHandler<>(listener, DeletePitResponse::new) + ); + } + + public void sendFreeAllPitContexts(Transport.Connection connection, final ActionListener listener) { + transportService.sendRequest( + connection, + FREE_ALL_PIT_CONTEXTS_ACTION_NAME, + TransportRequest.Empty.INSTANCE, + TransportRequestOptions.EMPTY, + new ActionListenerResponseHandler<>(listener, DeletePitResponse::new) + ); + } + public void sendExecuteDfs( Transport.Connection connection, final ShardSearchRequest request, @@ -370,6 +398,43 @@ public ShardSearchContextId id() { } + /** + * Request to free the PIT context based on id + */ + static class PitFreeContextsRequest extends TransportRequest { + private List contextIds; + + PitFreeContextsRequest(List contextIds) { + this.contextIds = new ArrayList<>(); + this.contextIds.addAll(contextIds); + } + + PitFreeContextsRequest(StreamInput in) throws IOException { + super(in); + int size = in.readVInt(); + if (size > 0) { + this.contextIds = new ArrayList<>(); + for (int i = 0; i < size; i++) { + PitSearchContextIdForNode contextId = new PitSearchContextIdForNode(in); + contextIds.add(contextId); + } + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVInt(contextIds.size()); + for (PitSearchContextIdForNode contextId : contextIds) { + contextId.writeTo(out); + } + } + + public List getContextIds() { + return this.contextIds; + } + } + /** * A search free context request * @@ -454,6 +519,23 @@ public static void registerRequestHandler(TransportService transportService, Sea } ); TransportActionProxy.registerProxyAction(transportService, FREE_CONTEXT_SCROLL_ACTION_NAME, SearchFreeContextResponse::new); + + transportService.registerRequestHandler( + FREE_PIT_CONTEXT_ACTION_NAME, + ThreadPool.Names.SAME, + PitFreeContextsRequest::new, + (request, channel, task) -> { channel.sendResponse(searchService.freeReaderContextsIfFound(request.getContextIds())); } + ); + TransportActionProxy.registerProxyAction(transportService, FREE_PIT_CONTEXT_ACTION_NAME, DeletePitResponse::new); + + transportService.registerRequestHandler( + FREE_ALL_PIT_CONTEXTS_ACTION_NAME, + ThreadPool.Names.SAME, + TransportRequest.Empty::new, + (request, channel, task) -> { channel.sendResponse(searchService.freeAllPitContexts()); } + ); + TransportActionProxy.registerProxyAction(transportService, FREE_ALL_PIT_CONTEXTS_ACTION_NAME, DeletePitResponse::new); + transportService.registerRequestHandler( FREE_CONTEXT_ACTION_NAME, ThreadPool.Names.SAME, diff --git a/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java b/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java new file mode 100644 index 0000000000000..d67979d1c87c5 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java @@ -0,0 +1,101 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.ActionListener; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.HandledTransportAction; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.NamedWriteableRegistry; +import org.opensearch.tasks.Task; +import org.opensearch.transport.Transport; +import org.opensearch.transport.TransportService; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Transport action for deleting point in time searches - supports deleting list and all point in time searches + */ +public class TransportDeletePitAction extends HandledTransportAction { + private final NamedWriteableRegistry namedWriteableRegistry; + private TransportSearchAction transportSearchAction; + private final ClusterService clusterService; + private final SearchTransportService searchTransportService; + private final PitService pitService; + + @Inject + public TransportDeletePitAction( + TransportService transportService, + ActionFilters actionFilters, + NamedWriteableRegistry namedWriteableRegistry, + TransportSearchAction transportSearchAction, + ClusterService clusterService, + SearchTransportService searchTransportService, + PitService pitService + ) { + super(DeletePitAction.NAME, transportService, actionFilters, DeletePitRequest::new); + this.namedWriteableRegistry = namedWriteableRegistry; + this.transportSearchAction = transportSearchAction; + this.clusterService = clusterService; + this.searchTransportService = searchTransportService; + this.pitService = pitService; + } + + /** + * Invoke 'delete all pits' or 'delete list of pits' workflow based on request + */ + @Override + protected void doExecute(Task task, DeletePitRequest request, ActionListener listener) { + List pitIds = request.getPitIds(); + if (pitIds.size() == 1 && "_all".equals(pitIds.get(0))) { + deleteAllPits(listener); + } else { + deletePits(listener, request); + } + } + + /** + * Deletes one or more point in time search contexts. + */ + private void deletePits(ActionListener listener, DeletePitRequest request) { + Map> nodeToContextsMap = new HashMap<>(); + for (String pitId : request.getPitIds()) { + SearchContextId contextId = SearchContextId.decode(namedWriteableRegistry, pitId); + for (SearchContextIdForNode contextIdForNode : contextId.shards().values()) { + PitSearchContextIdForNode pitSearchContext = new PitSearchContextIdForNode(pitId, contextIdForNode); + List contexts = nodeToContextsMap.getOrDefault(contextIdForNode.getNode(), new ArrayList<>()); + contexts.add(pitSearchContext); + nodeToContextsMap.put(contextIdForNode.getNode(), contexts); + } + } + pitService.deletePitContexts(nodeToContextsMap, listener); + } + + /** + * Delete all active PIT reader contexts + */ + private void deleteAllPits(ActionListener listener) { + // TODO: Use list all PITs to delete all PITs in case of remote cluster use case + int size = clusterService.state().getNodes().getSize(); + ActionListener groupedActionListener = pitService.getDeletePitGroupedListener(listener, size); + for (final DiscoveryNode node : clusterService.state().getNodes()) { + try { + Transport.Connection connection = searchTransportService.getConnection(null, node); + searchTransportService.sendFreeAllPitContexts(connection, groupedActionListener); + } catch (Exception e) { + groupedActionListener.onFailure(e); + } + } + } +} diff --git a/server/src/main/java/org/opensearch/client/Client.java b/server/src/main/java/org/opensearch/client/Client.java index a73f8200ab277..1d3bbfcba43f9 100644 --- a/server/src/main/java/org/opensearch/client/Client.java +++ b/server/src/main/java/org/opensearch/client/Client.java @@ -60,6 +60,8 @@ import org.opensearch.action.search.ClearScrollResponse; import org.opensearch.action.search.CreatePitRequest; import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.MultiSearchRequestBuilder; import org.opensearch.action.search.MultiSearchResponse; @@ -332,6 +334,11 @@ public interface Client extends OpenSearchClient, Releasable { */ void createPit(CreatePitRequest createPITRequest, ActionListener listener); + /** + * Delete one or more point in time contexts + */ + void deletePits(DeletePitRequest deletePITRequest, ActionListener listener); + /** * Performs multiple search requests. */ diff --git a/server/src/main/java/org/opensearch/client/support/AbstractClient.java b/server/src/main/java/org/opensearch/client/support/AbstractClient.java index 6cc0827310bd1..f99454a8a8913 100644 --- a/server/src/main/java/org/opensearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/opensearch/client/support/AbstractClient.java @@ -327,6 +327,9 @@ import org.opensearch.action.search.CreatePitAction; import org.opensearch.action.search.CreatePitRequest; import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.DeletePitAction; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; import org.opensearch.action.search.MultiSearchAction; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.MultiSearchRequestBuilder; @@ -582,6 +585,11 @@ public void createPit(final CreatePitRequest createPITRequest, final ActionListe execute(CreatePitAction.INSTANCE, createPITRequest, listener); } + @Override + public void deletePits(final DeletePitRequest deletePITRequest, final ActionListener listener) { + execute(DeletePitAction.INSTANCE, deletePITRequest, listener); + } + @Override public ActionFuture multiSearch(MultiSearchRequest request) { return execute(MultiSearchAction.INSTANCE, request); diff --git a/server/src/main/java/org/opensearch/search/SearchService.java b/server/src/main/java/org/opensearch/search/SearchService.java index 74527f26ead9e..747b13b031824 100644 --- a/server/src/main/java/org/opensearch/search/SearchService.java +++ b/server/src/main/java/org/opensearch/search/SearchService.java @@ -41,6 +41,9 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; import org.opensearch.action.OriginalIndices; +import org.opensearch.action.search.DeletePitInfo; +import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.action.search.PitSearchContextIdForNode; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchShardTask; import org.opensearch.action.search.SearchType; @@ -138,6 +141,7 @@ import org.opensearch.transport.TransportRequest; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -1063,6 +1067,57 @@ public void freeAllScrollContexts() { } } + /** + * Free reader contexts if found + * @return response with list of PIT IDs deleted and if operation is successful + */ + public DeletePitResponse freeReaderContextsIfFound(List contextIds) { + List deleteResults = new ArrayList<>(); + for (PitSearchContextIdForNode contextId : contextIds) { + try { + if (getReaderContext(contextId.getSearchContextIdForNode().getSearchContextId()) != null) { + try (ReaderContext context = removeReaderContext(contextId.getSearchContextIdForNode().getSearchContextId().getId())) { + PitReaderContext pitReaderContext = (PitReaderContext) context; + if (context == null) { + DeletePitInfo deletePitInfo = new DeletePitInfo(true, contextId.getPitId()); + deleteResults.add(deletePitInfo); + continue; + } + String pitId = pitReaderContext.getPitId(); + boolean success = context != null; + DeletePitInfo deletePitInfo = new DeletePitInfo(success, pitId); + deleteResults.add(deletePitInfo); + } + } else { + // For search context missing cases, mark the operation as succeeded + DeletePitInfo deletePitInfo = new DeletePitInfo(true, contextId.getPitId()); + deleteResults.add(deletePitInfo); + } + } catch (SearchContextMissingException e) { + // For search context missing cases, mark the operation as succeeded + DeletePitInfo deletePitInfo = new DeletePitInfo(true, contextId.getPitId()); + deleteResults.add(deletePitInfo); + } + } + return new DeletePitResponse(deleteResults); + } + + /** + * Free all active pit contexts + * @return response with list of PIT IDs deleted and if operation is successful + */ + public DeletePitResponse freeAllPitContexts() { + List deleteResults = new ArrayList<>(); + for (ReaderContext readerContext : activeReaders.values()) { + if (readerContext instanceof PitReaderContext) { + boolean result = freeReaderContext(readerContext.id()); + DeletePitInfo deletePitInfo = new DeletePitInfo(result, ((PitReaderContext) readerContext).getPitId()); + deleteResults.add(deletePitInfo); + } + } + return new DeletePitResponse(deleteResults); + } + private long getKeepAlive(ShardSearchRequest request) { if (request.scroll() != null) { return getScrollKeepAlive(request.scroll()); diff --git a/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java b/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java index 09c5bd834f7d5..2c65d6ffcc813 100644 --- a/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java +++ b/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java @@ -39,6 +39,7 @@ import org.opensearch.transport.RemoteClusterConnectionTests; import org.opensearch.transport.Transport; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -161,6 +162,7 @@ public void onFailure(Exception e) { */ public void testUpdatePitAfterCreatePitSuccess() throws InterruptedException { List updateNodesInvoked = new CopyOnWriteArrayList<>(); + List deleteNodesInvoked = new CopyOnWriteArrayList<>(); List knownNodes = new CopyOnWriteArrayList<>(); try ( MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); @@ -192,6 +194,20 @@ public void updatePitContext( t.start(); } + /** + * Test if cleanup request is called + */ + @Override + public void sendFreePITContexts( + Transport.Connection connection, + List contextIds, + ActionListener listener + ) { + deleteNodesInvoked.add(connection.getNode()); + Thread t = new Thread(() -> listener.onResponse(new DeletePitResponse(new ArrayList<>()))); + t.start(); + } + @Override public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { return new SearchAsyncActionTests.MockConnection(node); @@ -203,11 +219,13 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); request.setIndices(new String[] { "index" }); + PitService pitService = new PitService(clusterServiceMock, searchTransportService); CreatePitController controller = new CreatePitController( searchTransportService, clusterServiceMock, transportSearchAction, - namedWriteableRegistry + namedWriteableRegistry, + pitService ); ActionListener updatelistener = new LatchedActionListener<>(new ActionListener() { @@ -227,6 +245,7 @@ public void onFailure(Exception e) { createListener.onResponse(searchResponse); latch.await(); assertEquals(3, updateNodesInvoked.size()); + assertEquals(0, deleteNodesInvoked.size()); } } } @@ -236,6 +255,7 @@ public void onFailure(Exception e) { */ public void testUpdatePitAfterCreatePitFailure() throws InterruptedException { List updateNodesInvoked = new CopyOnWriteArrayList<>(); + List deleteNodesInvoked = new CopyOnWriteArrayList<>(); List knownNodes = new CopyOnWriteArrayList<>(); try ( MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); @@ -271,17 +291,30 @@ public void updatePitContext( public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { return new SearchAsyncActionTests.MockConnection(node); } + + @Override + public void sendFreePITContexts( + Transport.Connection connection, + List contextIds, + ActionListener listener + ) { + deleteNodesInvoked.add(connection.getNode()); + Thread t = new Thread(() -> listener.onResponse(new DeletePitResponse(new ArrayList<>()))); + t.start(); + } }; CountDownLatch latch = new CountDownLatch(1); CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); request.setIndices(new String[] { "index" }); + PitService pitService = new PitService(clusterServiceMock, searchTransportService); CreatePitController controller = new CreatePitController( searchTransportService, clusterServiceMock, transportSearchAction, - namedWriteableRegistry + namedWriteableRegistry, + pitService ); ActionListener updatelistener = new LatchedActionListener<>(new ActionListener() { @@ -302,6 +335,10 @@ public void onFailure(Exception e) { createListener.onFailure(new Exception("Exception occurred in phase 1")); latch.await(); assertEquals(0, updateNodesInvoked.size()); + /** + * cleanup is not called on create pit phase one failure + */ + assertEquals(0, deleteNodesInvoked.size()); } } } @@ -311,6 +348,7 @@ public void onFailure(Exception e) { */ public void testUpdatePitFailureForNodeDrop() throws InterruptedException { List updateNodesInvoked = new CopyOnWriteArrayList<>(); + List deleteNodesInvoked = new CopyOnWriteArrayList<>(); List knownNodes = new CopyOnWriteArrayList<>(); try ( MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); @@ -349,6 +387,17 @@ public void updatePitContext( } } + @Override + public void sendFreePITContexts( + Transport.Connection connection, + List contextIds, + ActionListener listener + ) { + deleteNodesInvoked.add(connection.getNode()); + Thread t = new Thread(() -> listener.onResponse(new DeletePitResponse(new ArrayList<>()))); + t.start(); + } + @Override public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { return new SearchAsyncActionTests.MockConnection(node); @@ -357,11 +406,13 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); request.setIndices(new String[] { "index" }); + PitService pitService = new PitService(clusterServiceMock, searchTransportService); CreatePitController controller = new CreatePitController( searchTransportService, clusterServiceMock, transportSearchAction, - namedWriteableRegistry + namedWriteableRegistry, + pitService ); CountDownLatch latch = new CountDownLatch(1); @@ -383,12 +434,17 @@ public void onFailure(Exception e) { createListener.onResponse(searchResponse); latch.await(); assertEquals(3, updateNodesInvoked.size()); + /** + * check if cleanup is called for all nodes in case of update pit failure + */ + assertEquals(3, deleteNodesInvoked.size()); } } } public void testUpdatePitFailureWhereAllNodesDown() throws InterruptedException { List updateNodesInvoked = new CopyOnWriteArrayList<>(); + List deleteNodesInvoked = new CopyOnWriteArrayList<>(); List knownNodes = new CopyOnWriteArrayList<>(); try ( MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); @@ -420,6 +476,17 @@ public void updatePitContext( t.start(); } + @Override + public void sendFreePITContexts( + Transport.Connection connection, + List contextIds, + ActionListener listener + ) { + deleteNodesInvoked.add(connection.getNode()); + Thread t = new Thread(() -> listener.onResponse(new DeletePitResponse(new ArrayList<>()))); + t.start(); + } + @Override public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { return new SearchAsyncActionTests.MockConnection(node); @@ -427,11 +494,13 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod }; CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); request.setIndices(new String[] { "index" }); + PitService pitService = new PitService(clusterServiceMock, searchTransportService); CreatePitController controller = new CreatePitController( searchTransportService, clusterServiceMock, transportSearchAction, - namedWriteableRegistry + namedWriteableRegistry, + pitService ); CountDownLatch latch = new CountDownLatch(1); @@ -453,8 +522,11 @@ public void onFailure(Exception e) { createListener.onResponse(searchResponse); latch.await(); assertEquals(3, updateNodesInvoked.size()); + /** + * check if cleanup is called for all nodes in case of update pit failure + */ + assertEquals(3, deleteNodesInvoked.size()); } } } - } diff --git a/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java b/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java new file mode 100644 index 0000000000000..e2db4fdbc97ef --- /dev/null +++ b/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java @@ -0,0 +1,638 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.action.search; + +import org.apache.lucene.search.TotalHits; +import org.junit.Before; +import org.opensearch.Version; +import org.opensearch.action.ActionListener; +import org.opensearch.action.support.ActionFilter; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.query.IdsQueryBuilder; +import org.opensearch.index.query.MatchAllQueryBuilder; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.search.SearchHit; +import org.opensearch.search.SearchHits; +import org.opensearch.search.aggregations.InternalAggregations; +import org.opensearch.search.internal.InternalSearchResponse; +import org.opensearch.tasks.Task; +import org.opensearch.tasks.TaskId; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.transport.MockTransportService; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.RemoteClusterConnectionTests; +import org.opensearch.transport.Transport; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.containsString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.opensearch.action.search.PitTestsUtil.getPitId; +import static org.opensearch.action.support.PlainActionFuture.newFuture; + +/** + * Functional tests for transport delete pit action + */ +public class TransportDeletePitActionTests extends OpenSearchTestCase { + DiscoveryNode node1 = null; + DiscoveryNode node2 = null; + DiscoveryNode node3 = null; + String pitId = null; + TransportSearchAction transportSearchAction = null; + Task task = null; + DiscoveryNodes nodes = null; + NamedWriteableRegistry namedWriteableRegistry = null; + ClusterService clusterServiceMock = null; + Settings settings = Settings.builder().put("node.name", TransportMultiSearchActionTests.class.getSimpleName()).build(); + private ThreadPool threadPool = new ThreadPool(settings); + + @Override + public void tearDown() throws Exception { + super.tearDown(); + ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); + } + + private MockTransportService startTransport(String id, List knownNodes, Version version) { + return startTransport(id, knownNodes, version, Settings.EMPTY); + } + + private MockTransportService startTransport( + final String id, + final List knownNodes, + final Version version, + final Settings settings + ) { + return RemoteClusterConnectionTests.startTransport(id, knownNodes, version, threadPool, settings); + } + + @Before + public void setupData() { + node1 = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Version.CURRENT); + node2 = new DiscoveryNode("node_2", buildNewFakeTransportAddress(), Version.CURRENT); + node3 = new DiscoveryNode("node_3", buildNewFakeTransportAddress(), Version.CURRENT); + pitId = getPitId(); + namedWriteableRegistry = new NamedWriteableRegistry( + Arrays.asList( + new NamedWriteableRegistry.Entry(QueryBuilder.class, TermQueryBuilder.NAME, TermQueryBuilder::new), + new NamedWriteableRegistry.Entry(QueryBuilder.class, MatchAllQueryBuilder.NAME, MatchAllQueryBuilder::new), + new NamedWriteableRegistry.Entry(QueryBuilder.class, IdsQueryBuilder.NAME, IdsQueryBuilder::new) + ) + ); + nodes = DiscoveryNodes.builder().add(node1).add(node2).add(node3).build(); + transportSearchAction = mock(TransportSearchAction.class); + task = new Task( + randomLong(), + "transport", + SearchAction.NAME, + "description", + new TaskId(randomLong() + ":" + randomLong()), + Collections.emptyMap() + ); + InternalSearchResponse response = new InternalSearchResponse( + new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), Float.NaN), + InternalAggregations.EMPTY, + null, + null, + false, + null, + 1 + ); + + clusterServiceMock = mock(ClusterService.class); + ClusterState state = mock(ClusterState.class); + + final Settings keepAliveSettings = Settings.builder().put(CreatePitController.PIT_INIT_KEEP_ALIVE.getKey(), 30000).build(); + when(clusterServiceMock.getSettings()).thenReturn(keepAliveSettings); + + when(state.getMetadata()).thenReturn(Metadata.EMPTY_METADATA); + when(state.metadata()).thenReturn(Metadata.EMPTY_METADATA); + when(clusterServiceMock.state()).thenReturn(state); + when(state.getNodes()).thenReturn(nodes); + } + + /** + * Test if transport call for update pit is made to all nodes present as part of PIT ID returned from phase one of create pit + */ + public void testDeletePitSuccess() throws InterruptedException, ExecutionException { + List deleteNodesInvoked = new CopyOnWriteArrayList<>(); + ActionFilters actionFilters = mock(ActionFilters.class); + when(actionFilters.filters()).thenReturn(new ActionFilter[0]); + List knownNodes = new CopyOnWriteArrayList<>(); + try ( + MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); + MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, Version.CURRENT) + ) { + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + knownNodes.add(cluster2Transport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + + try ( + MockTransportService transportService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + null + ) + ) { + transportService.start(); + transportService.acceptIncomingRequests(); + SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { + + @Override + public void sendFreePITContexts( + Transport.Connection connection, + List contextIds, + ActionListener listener + ) { + deleteNodesInvoked.add(connection.getNode()); + DeletePitInfo deletePitInfo = new DeletePitInfo(true, "pitId"); + List deletePitInfos = new ArrayList<>(); + deletePitInfos.add(deletePitInfo); + Thread t = new Thread(() -> listener.onResponse(new DeletePitResponse(deletePitInfos))); + t.start(); + } + + @Override + public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + return new SearchAsyncActionTests.MockConnection(node); + } + }; + PitService pitService = new PitService(clusterServiceMock, searchTransportService); + TransportDeletePitAction action = new TransportDeletePitAction( + transportService, + actionFilters, + namedWriteableRegistry, + transportSearchAction, + clusterServiceMock, + searchTransportService, + pitService + ); + DeletePitRequest deletePITRequest = new DeletePitRequest(pitId); + PlainActionFuture future = newFuture(); + action.execute(task, deletePITRequest, future); + DeletePitResponse dr = future.get(); + assertTrue(dr.getDeletePitResults().get(0).getPitId().equals("pitId")); + assertTrue(dr.getDeletePitResults().get(0).isSuccessful()); + assertEquals(3, deleteNodesInvoked.size()); + + } + } + } + + public void testDeleteAllPITSuccess() throws InterruptedException, ExecutionException { + List deleteNodesInvoked = new CopyOnWriteArrayList<>(); + ActionFilters actionFilters = mock(ActionFilters.class); + when(actionFilters.filters()).thenReturn(new ActionFilter[0]); + List knownNodes = new CopyOnWriteArrayList<>(); + try ( + MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); + MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, Version.CURRENT) + ) { + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + knownNodes.add(cluster2Transport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + + try ( + MockTransportService transportService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + null + ) + ) { + transportService.start(); + transportService.acceptIncomingRequests(); + SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { + public void sendFreeAllPitContexts(Transport.Connection connection, final ActionListener listener) { + deleteNodesInvoked.add(connection.getNode()); + DeletePitInfo deletePitInfo = new DeletePitInfo(true, "pitId"); + List deletePitInfos = new ArrayList<>(); + deletePitInfos.add(deletePitInfo); + Thread t = new Thread(() -> listener.onResponse(new DeletePitResponse(deletePitInfos))); + t.start(); + } + + @Override + public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + return new SearchAsyncActionTests.MockConnection(node); + } + }; + PitService pitService = new PitService(clusterServiceMock, searchTransportService); + TransportDeletePitAction action = new TransportDeletePitAction( + transportService, + actionFilters, + namedWriteableRegistry, + transportSearchAction, + clusterServiceMock, + searchTransportService, + pitService + ); + DeletePitRequest deletePITRequest = new DeletePitRequest("_all"); + PlainActionFuture future = newFuture(); + action.execute(task, deletePITRequest, future); + DeletePitResponse dr = future.get(); + assertTrue(dr.getDeletePitResults().get(0).getPitId().equals("pitId")); + assertTrue(dr.getDeletePitResults().get(0).isSuccessful()); + assertEquals(3, deleteNodesInvoked.size()); + + } + } + } + + public void testDeletePitWhenNodeIsDown() throws InterruptedException, ExecutionException { + List deleteNodesInvoked = new CopyOnWriteArrayList<>(); + ActionFilters actionFilters = mock(ActionFilters.class); + when(actionFilters.filters()).thenReturn(new ActionFilter[0]); + List knownNodes = new CopyOnWriteArrayList<>(); + try ( + MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); + MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, Version.CURRENT) + ) { + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + knownNodes.add(cluster2Transport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + + try ( + MockTransportService transportService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + null + ) + ) { + transportService.start(); + transportService.acceptIncomingRequests(); + SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { + + @Override + public void sendFreePITContexts( + Transport.Connection connection, + List contextIds, + ActionListener listener + ) { + deleteNodesInvoked.add(connection.getNode()); + + if (connection.getNode().getId() == "node_3") { + Thread t = new Thread(() -> listener.onFailure(new Exception("node 3 down"))); + t.start(); + } else { + Thread t = new Thread(() -> listener.onResponse(new DeletePitResponse(new ArrayList<>()))); + t.start(); + } + } + + @Override + public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + return new SearchAsyncActionTests.MockConnection(node); + } + }; + PitService pitService = new PitService(clusterServiceMock, searchTransportService); + TransportDeletePitAction action = new TransportDeletePitAction( + transportService, + actionFilters, + namedWriteableRegistry, + transportSearchAction, + clusterServiceMock, + searchTransportService, + pitService + ); + DeletePitRequest deletePITRequest = new DeletePitRequest(pitId); + PlainActionFuture future = newFuture(); + action.execute(task, deletePITRequest, future); + Exception e = assertThrows(ExecutionException.class, () -> future.get()); + assertThat(e.getMessage(), containsString("node 3 down")); + assertEquals(3, deleteNodesInvoked.size()); + } + } + } + + public void testDeletePitWhenAllNodesAreDown() { + List deleteNodesInvoked = new CopyOnWriteArrayList<>(); + ActionFilters actionFilters = mock(ActionFilters.class); + when(actionFilters.filters()).thenReturn(new ActionFilter[0]); + List knownNodes = new CopyOnWriteArrayList<>(); + try ( + MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); + MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, Version.CURRENT) + ) { + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + knownNodes.add(cluster2Transport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + try ( + MockTransportService transportService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + null + ) + ) { + transportService.start(); + transportService.acceptIncomingRequests(); + SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { + @Override + public void sendFreePITContexts( + Transport.Connection connection, + List contextIds, + ActionListener listener + ) { + deleteNodesInvoked.add(connection.getNode()); + Thread t = new Thread(() -> listener.onFailure(new Exception("node 3 down"))); + t.start(); + } + + @Override + public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + return new SearchAsyncActionTests.MockConnection(node); + } + }; + PitService pitService = new PitService(clusterServiceMock, searchTransportService); + TransportDeletePitAction action = new TransportDeletePitAction( + transportService, + actionFilters, + namedWriteableRegistry, + transportSearchAction, + clusterServiceMock, + searchTransportService, + pitService + ); + DeletePitRequest deletePITRequest = new DeletePitRequest(pitId); + PlainActionFuture future = newFuture(); + action.execute(task, deletePITRequest, future); + Exception e = assertThrows(ExecutionException.class, () -> future.get()); + assertThat(e.getMessage(), containsString("node 3 down")); + assertEquals(3, deleteNodesInvoked.size()); + } + } + } + + public void testDeletePitFailure() { + List deleteNodesInvoked = new CopyOnWriteArrayList<>(); + ActionFilters actionFilters = mock(ActionFilters.class); + when(actionFilters.filters()).thenReturn(new ActionFilter[0]); + + List knownNodes = new CopyOnWriteArrayList<>(); + try ( + MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); + MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, Version.CURRENT) + ) { + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + knownNodes.add(cluster2Transport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + + try ( + MockTransportService transportService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + null + ) + ) { + transportService.start(); + transportService.acceptIncomingRequests(); + SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { + + @Override + public void sendFreePITContexts( + Transport.Connection connection, + List contextId, + ActionListener listener + ) { + deleteNodesInvoked.add(connection.getNode()); + + if (connection.getNode().getId() == "node_3") { + Thread t = new Thread(() -> listener.onFailure(new Exception("node down"))); + t.start(); + } else { + Thread t = new Thread(() -> listener.onResponse(new DeletePitResponse(new ArrayList<>()))); + t.start(); + } + } + + @Override + public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + return new SearchAsyncActionTests.MockConnection(node); + } + }; + PitService pitService = new PitService(clusterServiceMock, searchTransportService); + TransportDeletePitAction action = new TransportDeletePitAction( + transportService, + actionFilters, + namedWriteableRegistry, + transportSearchAction, + clusterServiceMock, + searchTransportService, + pitService + ); + DeletePitRequest deletePITRequest = new DeletePitRequest(pitId); + PlainActionFuture future = newFuture(); + action.execute(task, deletePITRequest, future); + Exception e = assertThrows(ExecutionException.class, () -> future.get()); + assertThat(e.getMessage(), containsString("node down")); + assertEquals(3, deleteNodesInvoked.size()); + } + } + } + + public void testDeleteAllPitWhenNodeIsDown() { + List deleteNodesInvoked = new CopyOnWriteArrayList<>(); + ActionFilters actionFilters = mock(ActionFilters.class); + when(actionFilters.filters()).thenReturn(new ActionFilter[0]); + + List knownNodes = new CopyOnWriteArrayList<>(); + try ( + MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); + MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, Version.CURRENT) + ) { + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + knownNodes.add(cluster2Transport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + + try ( + MockTransportService transportService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + null + ) + ) { + transportService.start(); + transportService.acceptIncomingRequests(); + SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { + @Override + public void sendFreeAllPitContexts(Transport.Connection connection, final ActionListener listener) { + deleteNodesInvoked.add(connection.getNode()); + if (connection.getNode().getId() == "node_3") { + Thread t = new Thread(() -> listener.onFailure(new Exception("node 3 down"))); + t.start(); + } else { + Thread t = new Thread(() -> listener.onResponse(new DeletePitResponse(new ArrayList<>()))); + t.start(); + } + } + + @Override + public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + return new SearchAsyncActionTests.MockConnection(node); + } + }; + PitService pitService = new PitService(clusterServiceMock, searchTransportService); + TransportDeletePitAction action = new TransportDeletePitAction( + transportService, + actionFilters, + namedWriteableRegistry, + transportSearchAction, + clusterServiceMock, + searchTransportService, + pitService + ); + DeletePitRequest deletePITRequest = new DeletePitRequest("_all"); + PlainActionFuture future = newFuture(); + action.execute(task, deletePITRequest, future); + Exception e = assertThrows(ExecutionException.class, () -> future.get()); + assertThat(e.getMessage(), containsString("node 3 down")); + assertEquals(3, deleteNodesInvoked.size()); + } + } + } + + public void testDeleteAllPitWhenAllNodesAreDown() { + List deleteNodesInvoked = new CopyOnWriteArrayList<>(); + ActionFilters actionFilters = mock(ActionFilters.class); + when(actionFilters.filters()).thenReturn(new ActionFilter[0]); + + List knownNodes = new CopyOnWriteArrayList<>(); + try ( + MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); + MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, Version.CURRENT) + ) { + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + knownNodes.add(cluster2Transport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + + try ( + MockTransportService transportService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + null + ) + ) { + transportService.start(); + transportService.acceptIncomingRequests(); + SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { + + @Override + public void sendFreeAllPitContexts(Transport.Connection connection, final ActionListener listener) { + deleteNodesInvoked.add(connection.getNode()); + Thread t = new Thread(() -> listener.onFailure(new Exception("node down"))); + t.start(); + } + + @Override + public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + return new SearchAsyncActionTests.MockConnection(node); + } + }; + PitService pitService = new PitService(clusterServiceMock, searchTransportService); + TransportDeletePitAction action = new TransportDeletePitAction( + transportService, + actionFilters, + namedWriteableRegistry, + transportSearchAction, + clusterServiceMock, + searchTransportService, + pitService + ); + DeletePitRequest deletePITRequest = new DeletePitRequest("_all"); + PlainActionFuture future = newFuture(); + action.execute(task, deletePITRequest, future); + Exception e = assertThrows(ExecutionException.class, () -> future.get()); + assertThat(e.getMessage(), containsString("node down")); + assertEquals(3, deleteNodesInvoked.size()); + } + } + } + + public void testDeleteAllPitFailure() { + List deleteNodesInvoked = new CopyOnWriteArrayList<>(); + ActionFilters actionFilters = mock(ActionFilters.class); + when(actionFilters.filters()).thenReturn(new ActionFilter[0]); + + List knownNodes = new CopyOnWriteArrayList<>(); + try ( + MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); + MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, Version.CURRENT) + ) { + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + knownNodes.add(cluster2Transport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + + try ( + MockTransportService transportService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + null + ) + ) { + transportService.start(); + transportService.acceptIncomingRequests(); + SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { + + public void sendFreeAllPitContexts(Transport.Connection connection, final ActionListener listener) { + deleteNodesInvoked.add(connection.getNode()); + if (connection.getNode().getId() == "node_3") { + Thread t = new Thread(() -> listener.onFailure(new Exception("node 3 is down"))); + t.start(); + } else { + Thread t = new Thread(() -> listener.onResponse(new DeletePitResponse(new ArrayList<>()))); + t.start(); + } + } + + @Override + public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + return new SearchAsyncActionTests.MockConnection(node); + } + }; + PitService pitService = new PitService(clusterServiceMock, searchTransportService); + TransportDeletePitAction action = new TransportDeletePitAction( + transportService, + actionFilters, + namedWriteableRegistry, + transportSearchAction, + clusterServiceMock, + searchTransportService, + pitService + ); + DeletePitRequest deletePITRequest = new DeletePitRequest("_all"); + PlainActionFuture future = newFuture(); + action.execute(task, deletePITRequest, future); + Exception e = assertThrows(ExecutionException.class, () -> future.get()); + assertThat(e.getMessage(), containsString("java.lang.Exception: node 3 is down")); + assertEquals(3, deleteNodesInvoked.size()); + } + } + } +} diff --git a/server/src/test/java/org/opensearch/search/CreatePitMultiNodeTests.java b/server/src/test/java/org/opensearch/search/CreatePitMultiNodeTests.java index 7eb7a62348c5f..27d8f27add898 100644 --- a/server/src/test/java/org/opensearch/search/CreatePitMultiNodeTests.java +++ b/server/src/test/java/org/opensearch/search/CreatePitMultiNodeTests.java @@ -11,17 +11,31 @@ import org.junit.After; import org.junit.Before; import org.opensearch.action.ActionFuture; +import org.opensearch.action.ActionListener; +import org.opensearch.action.LatchedActionListener; import org.opensearch.action.search.CreatePitAction; import org.opensearch.action.search.CreatePitRequest; import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.DeletePitAction; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.search.builder.PointInTimeBuilder; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; import static org.hamcrest.Matchers.containsString; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; @@ -199,4 +213,103 @@ public void testPitInvalidDefaultKeepAlive() { .setTransientSettings(Settings.builder().putNull("*")) ); } + + public void testConcurrentCreates() throws InterruptedException { + CreatePitRequest createPitRequest = new CreatePitRequest(TimeValue.timeValueDays(1), true); + createPitRequest.setIndices(new String[] { "index" }); + + int concurrentRuns = randomIntBetween(20, 50); + AtomicInteger numSuccess = new AtomicInteger(); + TestThreadPool testThreadPool = null; + try { + testThreadPool = new TestThreadPool(DeletePitMultiNodeTests.class.getName()); + List operationThreads = new ArrayList<>(); + CountDownLatch countDownLatch = new CountDownLatch(concurrentRuns); + Set createSet = new HashSet<>(); + for (int i = 0; i < concurrentRuns; i++) { + Runnable thread = () -> { + logger.info("Triggering pit create --->"); + LatchedActionListener listener = new LatchedActionListener<>(new ActionListener() { + @Override + public void onResponse(CreatePitResponse createPitResponse) { + if (createSet.add(createPitResponse.getId())) { + numSuccess.incrementAndGet(); + } + } + + @Override + public void onFailure(Exception e) {} + }, countDownLatch); + client().execute(CreatePitAction.INSTANCE, createPitRequest, listener); + }; + operationThreads.add(thread); + } + TestThreadPool finalTestThreadPool = testThreadPool; + operationThreads.forEach(runnable -> finalTestThreadPool.executor("generic").execute(runnable)); + countDownLatch.await(); + assertEquals(concurrentRuns, numSuccess.get()); + } finally { + ThreadPool.terminate(testThreadPool, 500, TimeUnit.MILLISECONDS); + } + } + + public void testConcurrentCreatesWithDeletes() throws InterruptedException, ExecutionException { + CreatePitRequest createPitRequest = new CreatePitRequest(TimeValue.timeValueDays(1), true); + createPitRequest.setIndices(new String[] { "index" }); + List pitIds = new ArrayList<>(); + String id = client().execute(CreatePitAction.INSTANCE, createPitRequest).get().getId(); + pitIds.add(id); + DeletePitRequest deletePITRequest = new DeletePitRequest(pitIds); + Set createSet = new HashSet<>(); + AtomicInteger numSuccess = new AtomicInteger(); + TestThreadPool testThreadPool = null; + try { + testThreadPool = new TestThreadPool(CreatePitMultiNodeTests.class.getName()); + int concurrentRuns = randomIntBetween(20, 50); + + List operationThreads = new ArrayList<>(); + CountDownLatch countDownLatch = new CountDownLatch(concurrentRuns); + long randomDeleteThread = randomLongBetween(0, concurrentRuns - 1); + for (int i = 0; i < concurrentRuns; i++) { + int currentThreadIteration = i; + Runnable thread = () -> { + if (currentThreadIteration == randomDeleteThread) { + LatchedActionListener listener = new LatchedActionListener<>(new ActionListener() { + @Override + public void onResponse(CreatePitResponse createPitResponse) { + if (createSet.add(createPitResponse.getId())) { + numSuccess.incrementAndGet(); + } + } + + @Override + public void onFailure(Exception e) {} + }, countDownLatch); + client().execute(CreatePitAction.INSTANCE, createPitRequest, listener); + } else { + LatchedActionListener listener = new LatchedActionListener<>(new ActionListener() { + @Override + public void onResponse(DeletePitResponse deletePitResponse) { + if (deletePitResponse.getDeletePitResults().get(0).isSuccessful()) { + numSuccess.incrementAndGet(); + } + } + + @Override + public void onFailure(Exception e) {} + }, countDownLatch); + client().execute(DeletePitAction.INSTANCE, deletePITRequest, listener); + } + }; + operationThreads.add(thread); + } + TestThreadPool finalTestThreadPool = testThreadPool; + operationThreads.forEach(runnable -> finalTestThreadPool.executor("generic").execute(runnable)); + countDownLatch.await(); + assertEquals(concurrentRuns, numSuccess.get()); + + } finally { + ThreadPool.terminate(testThreadPool, 500, TimeUnit.MILLISECONDS); + } + } } diff --git a/server/src/test/java/org/opensearch/search/DeletePitMultiNodeTests.java b/server/src/test/java/org/opensearch/search/DeletePitMultiNodeTests.java new file mode 100644 index 0000000000000..89607b9201cd9 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/DeletePitMultiNodeTests.java @@ -0,0 +1,333 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search; + +import org.junit.After; +import org.junit.Before; +import org.opensearch.action.ActionFuture; +import org.opensearch.action.ActionListener; +import org.opensearch.action.LatchedActionListener; +import org.opensearch.action.search.CreatePitAction; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.DeletePitAction; +import org.opensearch.action.search.DeletePitInfo; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.search.builder.PointInTimeBuilder; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.hamcrest.Matchers.blankOrNullString; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.not; +import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; + +/** + * Multi node integration tests for delete PIT use cases + */ +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, numDataNodes = 2) +public class DeletePitMultiNodeTests extends OpenSearchIntegTestCase { + + @Before + public void setupIndex() throws ExecutionException, InterruptedException { + createIndex("index", Settings.builder().put("index.number_of_shards", 5).put("index.number_of_replicas", 1).build()); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).execute().get(); + ensureGreen(); + } + + @After + public void clearIndex() { + client().admin().indices().prepareDelete("index").get(); + } + + private CreatePitResponse createPitOnIndex(String index) throws ExecutionException, InterruptedException { + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { index }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + return execute.get(); + } + + public void testDeletePit() throws Exception { + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + List pitIds = new ArrayList<>(); + pitIds.add(pitResponse.getId()); + execute = client().execute(CreatePitAction.INSTANCE, request); + pitResponse = execute.get(); + pitIds.add(pitResponse.getId()); + DeletePitRequest deletePITRequest = new DeletePitRequest(pitIds); + ActionFuture deleteExecute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); + DeletePitResponse deletePITResponse = deleteExecute.get(); + assertEquals(2, deletePITResponse.getDeletePitResults().size()); + for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { + assertTrue(pitIds.contains(deletePitInfo.getPitId())); + assertTrue(deletePitInfo.isSuccessful()); + } + /** + * Checking deleting the same PIT id again results in succeeded + */ + deleteExecute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); + deletePITResponse = deleteExecute.get(); + for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { + assertTrue(pitIds.contains(deletePitInfo.getPitId())); + assertTrue(deletePitInfo.isSuccessful()); + } + } + + public void testDeletePitWithValidAndDeletedIds() throws Exception { + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + List pitIds = new ArrayList<>(); + pitIds.add(pitResponse.getId()); + + /** + * Delete Pit #1 + */ + DeletePitRequest deletePITRequest = new DeletePitRequest(pitIds); + ActionFuture deleteExecute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); + DeletePitResponse deletePITResponse = deleteExecute.get(); + for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { + assertTrue(pitIds.contains(deletePitInfo.getPitId())); + assertTrue(deletePitInfo.isSuccessful()); + } + execute = client().execute(CreatePitAction.INSTANCE, request); + pitResponse = execute.get(); + pitIds.add(pitResponse.getId()); + /** + * Delete PIT with both Ids #1 (which is deleted) and #2 (which is present) + */ + deletePITRequest = new DeletePitRequest(pitIds); + deleteExecute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); + deletePITResponse = deleteExecute.get(); + for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { + assertTrue(pitIds.contains(deletePitInfo.getPitId())); + assertTrue(deletePitInfo.isSuccessful()); + } + } + + public void testDeletePitWithValidAndInvalidIds() throws Exception { + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + List pitIds = new ArrayList<>(); + pitIds.add(pitResponse.getId()); + pitIds.add("nondecodableid"); + DeletePitRequest deletePITRequest = new DeletePitRequest(pitIds); + ActionFuture deleteExecute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); + Exception e = assertThrows(ExecutionException.class, () -> deleteExecute.get()); + assertThat(e.getMessage(), containsString("invalid id")); + } + + public void testDeleteAllPits() throws Exception { + createPitOnIndex("index"); + createIndex("index1", Settings.builder().put("index.number_of_shards", 5).put("index.number_of_replicas", 1).build()); + client().prepareIndex("index1").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).execute().get(); + ensureGreen(); + createPitOnIndex("index1"); + DeletePitRequest deletePITRequest = new DeletePitRequest("_all"); + + /** + * When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context + * not found exceptions don't result in failures ( as deletion in one node is successful ) + */ + ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); + DeletePitResponse deletePITResponse = execute.get(); + for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { + assertThat(deletePitInfo.getPitId(), not(blankOrNullString())); + assertTrue(deletePitInfo.isSuccessful()); + } + client().admin().indices().prepareDelete("index1").get(); + } + + public void testDeletePitWhileNodeDrop() throws Exception { + CreatePitResponse pitResponse = createPitOnIndex("index"); + createIndex("index1", Settings.builder().put("index.number_of_shards", 5).put("index.number_of_replicas", 1).build()); + client().prepareIndex("index1").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).execute().get(); + ensureGreen(); + List pitIds = new ArrayList<>(); + pitIds.add(pitResponse.getId()); + CreatePitResponse pitResponse1 = createPitOnIndex("index1"); + pitIds.add(pitResponse1.getId()); + DeletePitRequest deletePITRequest = new DeletePitRequest(pitIds); + internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) throws Exception { + ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); + try { + DeletePitResponse deletePITResponse = execute.get(); + for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { + assertTrue(pitIds.contains(deletePitInfo.getPitId())); + assertFalse(deletePitInfo.isSuccessful()); + } + } catch (Exception e) { + throw new AssertionError(e); + } + return super.onNodeStopped(nodeName); + } + }); + + ensureGreen(); + /** + * When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context + * not found exceptions don't result in failures ( as deletion in one node is successful ) + */ + ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); + DeletePitResponse deletePITResponse = execute.get(); + for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { + assertTrue(pitIds.contains(deletePitInfo.getPitId())); + assertTrue(deletePitInfo.isSuccessful()); + } + client().admin().indices().prepareDelete("index1").get(); + } + + public void testDeleteAllPitsWhileNodeDrop() throws Exception { + createPitOnIndex("index"); + createIndex("index1", Settings.builder().put("index.number_of_shards", 5).put("index.number_of_replicas", 1).build()); + client().prepareIndex("index1").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).execute().get(); + ensureGreen(); + DeletePitRequest deletePITRequest = new DeletePitRequest("_all"); + internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) throws Exception { + ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); + try { + DeletePitResponse deletePITResponse = execute.get(); + for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { + assertThat(deletePitInfo.getPitId(), not(blankOrNullString())); + assertFalse(deletePitInfo.isSuccessful()); + } + } catch (Exception e) { + assertTrue(e.getMessage().contains("Node not connected")); + } + return super.onNodeStopped(nodeName); + } + }); + + ensureGreen(); + /** + * When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context + * not found exceptions don't result in failures ( as deletion in one node is successful ) + */ + ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); + DeletePitResponse deletePITResponse = execute.get(); + for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { + assertThat(deletePitInfo.getPitId(), not(blankOrNullString())); + assertTrue(deletePitInfo.isSuccessful()); + } + client().admin().indices().prepareDelete("index1").get(); + } + + public void testDeleteWhileSearch() throws Exception { + CreatePitResponse pitResponse = createPitOnIndex("index"); + ensureGreen(); + List pitIds = new ArrayList<>(); + pitIds.add(pitResponse.getId()); + DeletePitRequest deletePITRequest = new DeletePitRequest(pitIds); + Thread[] threads = new Thread[5]; + CountDownLatch latch = new CountDownLatch(threads.length); + final AtomicBoolean deleted = new AtomicBoolean(false); + + for (int i = 0; i < threads.length; i++) { + threads[i] = new Thread(() -> { + latch.countDown(); + try { + latch.await(); + for (int j = 0; j < 30; j++) { + client().prepareSearch() + .setSize(2) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1))) + .execute() + .get(); + } + } catch (Exception e) { + /** + * assert for exception once delete pit goes through. throw error in case of any exeption before that. + */ + if (deleted.get() == true) { + if (!e.getMessage().contains("all shards failed")) throw new AssertionError(e); + return; + } + throw new AssertionError(e); + } + }); + threads[i].setName("opensearch[node_s_0][search]"); + threads[i].start(); + } + ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); + DeletePitResponse deletePITResponse = execute.get(); + deleted.set(true); + for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { + assertTrue(pitIds.contains(deletePitInfo.getPitId())); + assertTrue(deletePitInfo.isSuccessful()); + } + + for (Thread thread : threads) { + thread.join(); + } + } + + public void testtConcurrentDeletes() throws InterruptedException, ExecutionException { + CreatePitResponse pitResponse = createPitOnIndex("index"); + ensureGreen(); + int concurrentRuns = randomIntBetween(20, 50); + List pitIds = new ArrayList<>(); + pitIds.add(pitResponse.getId()); + DeletePitRequest deletePITRequest = new DeletePitRequest(pitIds); + AtomicInteger numDeleteAcknowledged = new AtomicInteger(); + TestThreadPool testThreadPool = null; + try { + testThreadPool = new TestThreadPool(DeletePitMultiNodeTests.class.getName()); + List operationThreads = new ArrayList<>(); + CountDownLatch countDownLatch = new CountDownLatch(concurrentRuns); + for (int i = 0; i < concurrentRuns; i++) { + Runnable thread = () -> { + logger.info("Triggering pit delete --->"); + LatchedActionListener listener = new LatchedActionListener<>(new ActionListener() { + @Override + public void onResponse(DeletePitResponse deletePitResponse) { + if (deletePitResponse.getDeletePitResults().get(0).isSuccessful()) { + numDeleteAcknowledged.incrementAndGet(); + } + } + + @Override + public void onFailure(Exception e) {} + }, countDownLatch); + client().execute(DeletePitAction.INSTANCE, deletePITRequest, listener); + }; + operationThreads.add(thread); + } + TestThreadPool finalTestThreadPool = testThreadPool; + operationThreads.forEach(runnable -> finalTestThreadPool.executor("generic").execute(runnable)); + countDownLatch.await(); + assertEquals(concurrentRuns, numDeleteAcknowledged.get()); + } finally { + ThreadPool.terminate(testThreadPool, 500, TimeUnit.MILLISECONDS); + } + } + +} diff --git a/server/src/test/java/org/opensearch/search/DeletePitResponseTests.java b/server/src/test/java/org/opensearch/search/DeletePitResponseTests.java new file mode 100644 index 0000000000000..5944e2a35b14a --- /dev/null +++ b/server/src/test/java/org/opensearch/search/DeletePitResponseTests.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search; + +import org.opensearch.action.search.DeletePitInfo; +import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.xcontent.ToXContent; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertToXContentEquivalent; + +public class DeletePitResponseTests extends OpenSearchTestCase { + + public void testDeletePitResponseToXContent() throws IOException { + DeletePitInfo deletePitInfo = new DeletePitInfo(true, "pitId"); + List deletePitInfoList = new ArrayList<>(); + deletePitInfoList.add(deletePitInfo); + DeletePitResponse deletePitResponse = new DeletePitResponse(deletePitInfoList); + + try (XContentBuilder builder = JsonXContent.contentBuilder()) { + deletePitResponse.toXContent(builder, ToXContent.EMPTY_PARAMS); + } + assertEquals(true, deletePitResponse.getDeletePitResults().get(0).getPitId().equals("pitId")); + assertEquals(true, deletePitResponse.getDeletePitResults().get(0).isSuccessful()); + } + + public void testDeletePitResponseToAndFromXContent() throws IOException { + XContentType xContentType = randomFrom(XContentType.values()); + DeletePitResponse originalResponse = createDeletePitResponseTestItem(); + ; + BytesReference originalBytes = toShuffledXContent(originalResponse, xContentType, ToXContent.EMPTY_PARAMS, randomBoolean()); + DeletePitResponse parsedResponse; + try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) { + parsedResponse = DeletePitResponse.fromXContent(parser); + } + assertEquals( + originalResponse.getDeletePitResults().get(0).isSuccessful(), + parsedResponse.getDeletePitResults().get(0).isSuccessful() + ); + assertEquals(originalResponse.getDeletePitResults().get(0).getPitId(), parsedResponse.getDeletePitResults().get(0).getPitId()); + BytesReference parsedBytes = XContentHelper.toXContent(parsedResponse, xContentType, randomBoolean()); + assertToXContentEquivalent(originalBytes, parsedBytes, xContentType); + } + + private static DeletePitResponse createDeletePitResponseTestItem() { + DeletePitInfo deletePitInfo = new DeletePitInfo(randomBoolean(), "pitId"); + List deletePitInfoList = new ArrayList<>(); + deletePitInfoList.add(deletePitInfo); + return new DeletePitResponse(deletePitInfoList); + } +} diff --git a/server/src/test/java/org/opensearch/search/SearchServiceTests.java b/server/src/test/java/org/opensearch/search/SearchServiceTests.java index 08a2ee155ccd4..ecc28470b0eb2 100644 --- a/server/src/test/java/org/opensearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/opensearch/search/SearchServiceTests.java @@ -41,6 +41,9 @@ import org.opensearch.action.OriginalIndices; import org.opensearch.action.index.IndexResponse; import org.opensearch.action.search.ClearScrollRequest; +import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.action.search.PitSearchContextIdForNode; +import org.opensearch.action.search.SearchContextIdForNode; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchResponse; @@ -1425,6 +1428,42 @@ private ReaderContext createReaderContext(IndexService indexService, IndexShard ); } + public void testDeletePitReaderContext() { + createIndex("index"); + SearchService searchService = getInstanceFromNode(SearchService.class); + PlainActionFuture future = new PlainActionFuture<>(); + searchService.createPitReaderContext(new ShardId(resolveIndex("index"), 0), TimeValue.timeValueMinutes(between(1, 10)), future); + List contextIds = new ArrayList<>(); + ShardSearchContextId shardSearchContextId = future.actionGet(); + PitSearchContextIdForNode pitSearchContextIdForNode = new PitSearchContextIdForNode( + "1", + new SearchContextIdForNode(null, "node1", shardSearchContextId) + ); + contextIds.add(pitSearchContextIdForNode); + + assertThat(searchService.getActiveContexts(), equalTo(1)); + DeletePitResponse deletePitResponse = searchService.freeReaderContextsIfFound(contextIds); + assertTrue(deletePitResponse.getDeletePitResults().get(0).isSuccessful()); + // assert true for reader context not found + deletePitResponse = searchService.freeReaderContextsIfFound(contextIds); + assertTrue(deletePitResponse.getDeletePitResults().get(0).isSuccessful()); + // adding this assert to showcase behavior difference + assertFalse(searchService.freeReaderContext(future.actionGet())); + } + + public void testDeleteAllPitReaderContexts() { + createIndex("index"); + SearchService searchService = getInstanceFromNode(SearchService.class); + PlainActionFuture future = new PlainActionFuture<>(); + searchService.createPitReaderContext(new ShardId(resolveIndex("index"), 0), TimeValue.timeValueMinutes(between(1, 10)), future); + future.actionGet(); + searchService.createPitReaderContext(new ShardId(resolveIndex("index"), 0), TimeValue.timeValueMinutes(between(1, 10)), future); + future.actionGet(); + assertThat(searchService.getActiveContexts(), equalTo(2)); + searchService.freeAllPitContexts(); + assertThat(searchService.getActiveContexts(), equalTo(0)); + } + public void testPitContextMaxKeepAlive() { createIndex("index"); SearchService searchService = getInstanceFromNode(SearchService.class); From 7694a27ea380c866fba0fe9b36a19308cd125378 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Tue, 26 Jul 2022 11:51:10 -0400 Subject: [PATCH 038/104] Fixing flaky org.opensearch.cluster.routing.allocation.decider.DiskThresholdDeciderIT.testHighWatermarkNotExceeded test case (#4012) Signed-off-by: Andriy Redko --- .../routing/allocation/decider/DiskThresholdDeciderIT.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java index fcfcec3445a80..10e809e2fb5dc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java @@ -59,6 +59,7 @@ import org.opensearch.core.internal.io.IOUtils; import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; +import org.opensearch.index.IndexSettings; import org.opensearch.monitor.fs.FsService; import org.opensearch.plugins.Plugin; import org.opensearch.repositories.fs.FsRepository; @@ -173,6 +174,7 @@ public void testHighWatermarkNotExceeded() throws Exception { .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 6) .put(INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), "0ms") + .put(IndexSettings.INDEX_MERGE_ON_FLUSH_ENABLED.getKey(), false) .build() ); final long minShardSize = createReasonableSizedShards(indexName); From 00db112f32d947e758d7d5f6e4a6c7444a22a7c3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Jul 2022 17:09:37 -0500 Subject: [PATCH 039/104] Bump woodstox-core from 6.2.8 to 6.3.0 in /plugins/repository-hdfs (#3841) Bumps [woodstox-core](https://github.com/FasterXML/woodstox) from 6.2.8 to 6.3.0. - [Release notes](https://github.com/FasterXML/woodstox/releases) - [Commits](https://github.com/FasterXML/woodstox/compare/woodstox-core-6.2.8...woodstox-core-6.3.0) --- updated-dependencies: - dependency-name: com.fasterxml.woodstox:woodstox-core dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- plugins/repository-hdfs/build.gradle | 2 +- plugins/repository-hdfs/licenses/woodstox-core-6.2.8.jar.sha1 | 1 - plugins/repository-hdfs/licenses/woodstox-core-6.3.0.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-hdfs/licenses/woodstox-core-6.2.8.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/woodstox-core-6.3.0.jar.sha1 diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 49fdca4ab9c53..d1658279bfe48 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -83,7 +83,7 @@ dependencies { api 'net.minidev:json-smart:2.4.8' api 'org.apache.zookeeper:zookeeper:3.8.0' api "io.netty:netty-all:${versions.netty}" - implementation 'com.fasterxml.woodstox:woodstox-core:6.2.8' + implementation 'com.fasterxml.woodstox:woodstox-core:6.3.0' implementation 'org.codehaus.woodstox:stax2-api:4.2.1' hdfsFixture project(':test:fixtures:hdfs-fixture') diff --git a/plugins/repository-hdfs/licenses/woodstox-core-6.2.8.jar.sha1 b/plugins/repository-hdfs/licenses/woodstox-core-6.2.8.jar.sha1 deleted file mode 100644 index ae65cdebf26de..0000000000000 --- a/plugins/repository-hdfs/licenses/woodstox-core-6.2.8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -670748292899c53b1963730d9eb7f8ab71314e90 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/woodstox-core-6.3.0.jar.sha1 b/plugins/repository-hdfs/licenses/woodstox-core-6.3.0.jar.sha1 new file mode 100644 index 0000000000000..ebd85df98b39e --- /dev/null +++ b/plugins/repository-hdfs/licenses/woodstox-core-6.3.0.jar.sha1 @@ -0,0 +1 @@ +03c1df4164b107ee22ad4f24bd453ec78a0efd95 \ No newline at end of file From 85bfde171180e1e6ff02074dcac5dfad1c804efc Mon Sep 17 00:00:00 2001 From: Hauck <67768441+hauck-jvsh@users.noreply.github.com> Date: Thu, 28 Jul 2022 16:12:48 -0300 Subject: [PATCH 040/104] Fix the version tests (#4035) Signed-off-by: Hauck --- .../test/search.highlight/30_max_analyzed_offset.yml | 4 ++-- .../fetch/subphase/highlight/AbstractHighlighterBuilder.java | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/30_max_analyzed_offset.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/30_max_analyzed_offset.yml index c38afc96590e9..a18ac45e62175 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/30_max_analyzed_offset.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/30_max_analyzed_offset.yml @@ -39,8 +39,8 @@ setup: --- "Unified highlighter on a field WITHOUT OFFSETS using max_analyzer_offset should SUCCEED": - skip: - version: " - 2.99.99" - reason: only starting supporting the parameter max_analyzer_offset on version 3.0 + version: " - 2.1.99" + reason: only starting supporting the parameter max_analyzer_offset on version 2.2 - do: search: rest_total_hits_as_int: true diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java index db0089fd5f180..94533a5e3dae6 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java @@ -189,7 +189,7 @@ protected AbstractHighlighterBuilder(StreamInput in) throws IOException { requireFieldMatch(in.readOptionalBoolean()); - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_2_0)) { maxAnalyzerOffset(in.readOptionalVInt()); } @@ -234,7 +234,7 @@ public final void writeTo(StreamOutput out) throws IOException { out.writeMap(options); } out.writeOptionalBoolean(requireFieldMatch); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_2_0)) { out.writeOptionalVInt(maxAnalyzerOffset); } doWriteTo(out); From fb7d81a7c00df14223e34511a6b1b5a3eff44e65 Mon Sep 17 00:00:00 2001 From: Petar Dzepina Date: Thu, 28 Jul 2022 21:30:28 +0200 Subject: [PATCH 041/104] Add doc_count field mapper (#3985) Bucket aggregations compute bucket doc_count values by incrementing the doc_count by 1 for every document collected in the bucket. When using summary fields (such as aggregate_metric_double) one field may represent more than one document. To provide this functionality this commit implements a new field mapper (named doc_count field mapper). This field is a positive integer representing the number of documents aggregated in a single summary field. Bucket aggregations check if a field of type doc_count exists in a document and take this value into consideration when computing doc counts. Note: This originated from upstream PR 64503. Signed-off-by: Petar Dzepina --- .../380_doc_count_field.yml | 146 ++++++++++++++++ .../index/mapper/DocCountFieldMapper.java | 164 ++++++++++++++++++ .../org/opensearch/indices/IndicesModule.java | 2 + .../search/aggregations/AggregatorBase.java | 4 +- .../bucket/BucketsAggregator.java | 37 ++-- .../aggregations/bucket/DocCountProvider.java | 62 +++++++ .../adjacency/AdjacencyMatrixAggregator.java | 4 +- .../bucket/composite/CompositeAggregator.java | 5 +- .../CompositeValuesCollectorQueue.java | 22 +-- .../bucket/composite/SortedDocsProducer.java | 6 +- .../bucket/nested/NestedAggregator.java | 3 +- .../GlobalOrdinalsStringTermsAggregator.java | 13 +- .../mapper/DocCountFieldMapperTests.java | 83 +++++++++ .../index/mapper/DocCountFieldTypeTests.java | 69 ++++++++ .../indices/IndicesModuleTests.java | 2 + .../bucket/DocCountProviderTests.java | 107 ++++++++++++ .../CompositeValuesCollectorQueueTests.java | 2 +- 17 files changed, 692 insertions(+), 39 deletions(-) create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/380_doc_count_field.yml create mode 100644 server/src/main/java/org/opensearch/index/mapper/DocCountFieldMapper.java create mode 100644 server/src/main/java/org/opensearch/search/aggregations/bucket/DocCountProvider.java create mode 100644 server/src/test/java/org/opensearch/index/mapper/DocCountFieldMapperTests.java create mode 100644 server/src/test/java/org/opensearch/index/mapper/DocCountFieldTypeTests.java create mode 100644 server/src/test/java/org/opensearch/search/aggregations/bucket/DocCountProviderTests.java diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/380_doc_count_field.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/380_doc_count_field.yml new file mode 100644 index 0000000000000..a7a796eceeb39 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/380_doc_count_field.yml @@ -0,0 +1,146 @@ +setup: + - do: + indices.create: + index: test_1 + body: + settings: + number_of_replicas: 0 + mappings: + properties: + str: + type: keyword + number: + type: integer + + - do: + bulk: + index: test_1 + refresh: true + body: + - '{"index": {}}' + - '{"_doc_count": 10, "str": "abc", "number" : 500, "unmapped": "abc" }' + - '{"index": {}}' + - '{"_doc_count": 5, "str": "xyz", "number" : 100, "unmapped": "xyz" }' + - '{"index": {}}' + - '{"_doc_count": 7, "str": "foo", "number" : 100, "unmapped": "foo" }' + - '{"index": {}}' + - '{"_doc_count": 1, "str": "foo", "number" : 200, "unmapped": "foo" }' + - '{"index": {}}' + - '{"str": "abc", "number" : 500, "unmapped": "abc" }' + +--- +"Test numeric terms agg with doc_count": + - skip: + version: "1.0.0 - " + reason: "until this is released and we know exact version" + + - do: + search: + rest_total_hits_as_int: true + body: { "size" : 0, "aggs" : { "num_terms" : { "terms" : { "field" : "number" } } } } + + - match: { hits.total: 5 } + - length: { aggregations.num_terms.buckets: 3 } + - match: { aggregations.num_terms.buckets.0.key: 100 } + - match: { aggregations.num_terms.buckets.0.doc_count: 12 } + - match: { aggregations.num_terms.buckets.1.key: 500 } + - match: { aggregations.num_terms.buckets.1.doc_count: 11 } + - match: { aggregations.num_terms.buckets.2.key: 200 } + - match: { aggregations.num_terms.buckets.2.doc_count: 1 } + +--- +"Test keyword terms agg with doc_count": + - skip: + version: "1.0.0 - " + reason: "until this is released and we know exact version" + - do: + search: + rest_total_hits_as_int: true + body: { "size" : 0, "aggs" : { "str_terms" : { "terms" : { "field" : "str" } } } } + + - match: { hits.total: 5 } + - length: { aggregations.str_terms.buckets: 3 } + - match: { aggregations.str_terms.buckets.0.key: "abc" } + - match: { aggregations.str_terms.buckets.0.doc_count: 11 } + - match: { aggregations.str_terms.buckets.1.key: "foo" } + - match: { aggregations.str_terms.buckets.1.doc_count: 8 } + - match: { aggregations.str_terms.buckets.2.key: "xyz" } + - match: { aggregations.str_terms.buckets.2.doc_count: 5 } + +--- +"Test unmapped string terms agg with doc_count": + - skip: + version: "1.0.0 - " + reason: "until this is released and we know exact version" + - do: + bulk: + index: test_2 + refresh: true + body: + - '{"index": {}}' + - '{"_doc_count": 10, "str": "abc" }' + - '{"index": {}}' + - '{"str": "abc" }' + - do: + search: + index: test_2 + rest_total_hits_as_int: true + body: { "size" : 0, "aggs" : { "str_terms" : { "terms" : { "field" : "str.keyword" } } } } + + - match: { hits.total: 2 } + - length: { aggregations.str_terms.buckets: 1 } + - match: { aggregations.str_terms.buckets.0.key: "abc" } + - match: { aggregations.str_terms.buckets.0.doc_count: 11 } + +--- +"Test composite str_terms agg with doc_count": + - skip: + version: "1.0.0 - " + reason: "until this is released and we know exact version" + - do: + search: + rest_total_hits_as_int: true + body: { "size" : 0, "aggs" : + { "composite_agg" : { "composite" : + { + "sources": ["str_terms": { "terms": { "field": "str" } }] + } + } + } + } + + - match: { hits.total: 5 } + - length: { aggregations.composite_agg.buckets: 3 } + - match: { aggregations.composite_agg.buckets.0.key.str_terms: "abc" } + - match: { aggregations.composite_agg.buckets.0.doc_count: 11 } + - match: { aggregations.composite_agg.buckets.1.key.str_terms: "foo" } + - match: { aggregations.composite_agg.buckets.1.doc_count: 8 } + - match: { aggregations.composite_agg.buckets.2.key.str_terms: "xyz" } + - match: { aggregations.composite_agg.buckets.2.doc_count: 5 } + +--- +"Test composite num_terms agg with doc_count": + - skip: + version: "1.0.0 - " + reason: "until this is released and we know exact version" + - do: + search: + rest_total_hits_as_int: true + body: { "size" : 0, "aggs" : + { "composite_agg" : + { "composite" : + { + "sources": ["num_terms" : { "terms" : { "field" : "number" } }] + } + } + } + } + + - match: { hits.total: 5 } + - length: { aggregations.composite_agg.buckets: 3 } + - match: { aggregations.composite_agg.buckets.0.key.num_terms: 100 } + - match: { aggregations.composite_agg.buckets.0.doc_count: 12 } + - match: { aggregations.composite_agg.buckets.1.key.num_terms: 200 } + - match: { aggregations.composite_agg.buckets.1.doc_count: 1 } + - match: { aggregations.composite_agg.buckets.2.key.num_terms: 500 } + - match: { aggregations.composite_agg.buckets.2.doc_count: 11 } diff --git a/server/src/main/java/org/opensearch/index/mapper/DocCountFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/DocCountFieldMapper.java new file mode 100644 index 0000000000000..e3f6f5ef130e9 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/mapper/DocCountFieldMapper.java @@ -0,0 +1,164 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ +package org.opensearch.index.mapper; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.NumericDocValuesField; +import org.apache.lucene.search.DocValuesFieldExistsQuery; +import org.apache.lucene.search.Query; +import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.common.xcontent.XContentParserUtils; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.query.QueryShardException; +import org.opensearch.search.lookup.SearchLookup; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; + +/** + * Mapper for the doc_count field. + * + * @opensearch.internal + */ +public class DocCountFieldMapper extends MetadataFieldMapper { + + public static final String NAME = "_doc_count"; + public static final String CONTENT_TYPE = "_doc_count"; + + public static final TypeParser PARSER = new ConfigurableTypeParser( + c -> new DocCountFieldMapper(), + c -> new DocCountFieldMapper.Builder() + ); + + static class Builder extends MetadataFieldMapper.Builder { + + Builder() { + super(NAME); + } + + @Override + protected List> getParameters() { + return Collections.emptyList(); + } + + @Override + public DocCountFieldMapper build(BuilderContext context) { + return new DocCountFieldMapper(); + } + } + + /** + * Field type for DocCount Field Mapper + * + * @opensearch.internal + */ + public static final class DocCountFieldType extends MappedFieldType { + + public static final DocCountFieldType INSTANCE = new DocCountFieldType(); + + private static final Long defaultValue = 1L; + + public DocCountFieldType() { + super(NAME, false, false, true, TextSearchInfo.NONE, Collections.emptyMap()); + } + + @Override + public String typeName() { + return CONTENT_TYPE; + } + + @Override + public String familyTypeName() { + return NumberFieldMapper.NumberType.LONG.typeName(); + } + + @Override + public Query existsQuery(QueryShardContext context) { + return new DocValuesFieldExistsQuery(NAME); + } + + @Override + public Query termQuery(Object value, QueryShardContext context) { + throw new QueryShardException(context, "Field [" + name() + "] of type [" + typeName() + "] is not searchable"); + } + + @Override + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { + if (format != null) { + throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] doesn't support formats."); + } + + return new SourceValueFetcher(name(), context, defaultValue) { + @Override + protected Object parseSourceValue(Object value) { + if ("".equals(value)) { + return defaultValue; + } else { + return NumberFieldMapper.NumberType.objectToLong(value, false); + } + } + }; + } + } + + private DocCountFieldMapper() { + super(DocCountFieldType.INSTANCE); + } + + @Override + protected void parseCreateField(ParseContext context) throws IOException { + XContentParser parser = context.parser(); + XContentParserUtils.ensureExpectedToken(XContentParser.Token.VALUE_NUMBER, parser.currentToken(), parser); + + long value = parser.longValue(false); + if (value <= 0) { + throw new IllegalArgumentException("Field [" + fieldType().name() + "] must be a positive integer."); + } + final Field docCount = new NumericDocValuesField(NAME, value); + context.doc().add(docCount); + } + + @Override + public void preParse(ParseContext context) {} + + @Override + public DocCountFieldType fieldType() { + return (DocCountFieldType) super.fieldType(); + } + + @Override + protected String contentType() { + return CONTENT_TYPE; + } + +} diff --git a/server/src/main/java/org/opensearch/indices/IndicesModule.java b/server/src/main/java/org/opensearch/indices/IndicesModule.java index 29ff507ad9fcf..e413a73940011 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesModule.java +++ b/server/src/main/java/org/opensearch/indices/IndicesModule.java @@ -48,6 +48,7 @@ import org.opensearch.index.mapper.CompletionFieldMapper; import org.opensearch.index.mapper.DataStreamFieldMapper; import org.opensearch.index.mapper.DateFieldMapper; +import org.opensearch.index.mapper.DocCountFieldMapper; import org.opensearch.index.mapper.FieldAliasMapper; import org.opensearch.index.mapper.FieldNamesFieldMapper; import org.opensearch.index.mapper.GeoPointFieldMapper; @@ -192,6 +193,7 @@ private static Map initBuiltInMetadataMa builtInMetadataMappers.put(NestedPathFieldMapper.NAME, NestedPathFieldMapper.PARSER); builtInMetadataMappers.put(VersionFieldMapper.NAME, VersionFieldMapper.PARSER); builtInMetadataMappers.put(SeqNoFieldMapper.NAME, SeqNoFieldMapper.PARSER); + builtInMetadataMappers.put(DocCountFieldMapper.NAME, DocCountFieldMapper.PARSER); // _field_names must be added last so that it has a chance to see all the other mappers builtInMetadataMappers.put(FieldNamesFieldMapper.NAME, FieldNamesFieldMapper.PARSER); return Collections.unmodifiableMap(builtInMetadataMappers); diff --git a/server/src/main/java/org/opensearch/search/aggregations/AggregatorBase.java b/server/src/main/java/org/opensearch/search/aggregations/AggregatorBase.java index db51f00056f72..1d315980512b4 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/AggregatorBase.java +++ b/server/src/main/java/org/opensearch/search/aggregations/AggregatorBase.java @@ -200,7 +200,7 @@ public Map metadata() { @Override public final LeafBucketCollector getLeafCollector(LeafReaderContext ctx) throws IOException { - preGetSubLeafCollectors(); + preGetSubLeafCollectors(ctx); final LeafBucketCollector sub = collectableSubAggregators.getLeafCollector(ctx); return getLeafCollector(ctx, sub); } @@ -209,7 +209,7 @@ public final LeafBucketCollector getLeafCollector(LeafReaderContext ctx) throws * Can be overridden by aggregator implementations that like the perform an operation before the leaf collectors * of children aggregators are instantiated for the next segment. */ - protected void preGetSubLeafCollectors() throws IOException {} + protected void preGetSubLeafCollectors(LeafReaderContext ctx) throws IOException {} /** * Can be overridden by aggregator implementation to be called back when the collection phase starts. diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/BucketsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/BucketsAggregator.java index 79512586d06de..67af0b13eed3b 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/BucketsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/BucketsAggregator.java @@ -31,9 +31,10 @@ package org.opensearch.search.aggregations.bucket; +import org.apache.lucene.index.LeafReaderContext; import org.opensearch.common.lease.Releasable; import org.opensearch.common.util.BigArrays; -import org.opensearch.common.util.IntArray; +import org.opensearch.common.util.LongArray; import org.opensearch.search.aggregations.AggregationExecutionException; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorBase; @@ -70,7 +71,8 @@ public abstract class BucketsAggregator extends AggregatorBase { private final BigArrays bigArrays; private final IntConsumer multiBucketConsumer; - private IntArray docCounts; + private LongArray docCounts; + protected final DocCountProvider docCountProvider; public BucketsAggregator( String name, @@ -87,7 +89,8 @@ public BucketsAggregator( } else { multiBucketConsumer = (count) -> {}; } - docCounts = bigArrays.newIntArray(1, true); + docCounts = bigArrays.newLongArray(1, true); + docCountProvider = new DocCountProvider(); } /** @@ -116,7 +119,8 @@ public final void collectBucket(LeafBucketCollector subCollector, int doc, long * Same as {@link #collectBucket(LeafBucketCollector, int, long)}, but doesn't check if the docCounts needs to be re-sized. */ public final void collectExistingBucket(LeafBucketCollector subCollector, int doc, long bucketOrd) throws IOException { - if (docCounts.increment(bucketOrd, 1) == 1) { + long docCount = docCountProvider.getDocCount(doc); + if (docCounts.increment(bucketOrd, docCount) == docCount) { // We calculate the final number of buckets only during the reduce phase. But we still need to // trigger bucket consumer from time to time in order to give it a chance to check available memory and break // the execution if we are running out. To achieve that we are passing 0 as a bucket count. @@ -147,11 +151,11 @@ public final void mergeBuckets(long[] mergeMap, long newNumBuckets) { * merge the actual ordinals and doc ID deltas. */ public final void mergeBuckets(long newNumBuckets, LongUnaryOperator mergeMap) { - try (IntArray oldDocCounts = docCounts) { - docCounts = bigArrays.newIntArray(newNumBuckets, true); + try (LongArray oldDocCounts = docCounts) { + docCounts = bigArrays.newLongArray(newNumBuckets, true); docCounts.fill(0, newNumBuckets, 0); for (long i = 0; i < oldDocCounts.size(); i++) { - int docCount = oldDocCounts.get(i); + long docCount = oldDocCounts.get(i); if (docCount == 0) continue; @@ -164,14 +168,14 @@ public final void mergeBuckets(long newNumBuckets, LongUnaryOperator mergeMap) { } } - public IntArray getDocCounts() { + public LongArray getDocCounts() { return docCounts; } /** * Utility method to increment the doc counts of the given bucket (identified by the bucket ordinal) */ - public final void incrementBucketDocCount(long bucketOrd, int inc) { + public final void incrementBucketDocCount(long bucketOrd, long inc) { docCounts = bigArrays.grow(docCounts, bucketOrd + 1); docCounts.increment(bucketOrd, inc); } @@ -179,7 +183,7 @@ public final void incrementBucketDocCount(long bucketOrd, int inc) { /** * Utility method to return the number of documents that fell in the given bucket (identified by the bucket ordinal) */ - public final int bucketDocCount(long bucketOrd) { + public final long bucketDocCount(long bucketOrd) { if (bucketOrd >= docCounts.size()) { // This may happen eg. if no document in the highest buckets is accepted by a sub aggregator. // For example, if there is a long terms agg on 3 terms 1,2,3 with a sub filter aggregator and if no document with 3 as a value @@ -337,7 +341,7 @@ protected final InternalAggregation[] buildAggregationsForFixedBucketCount( */ @FunctionalInterface protected interface BucketBuilderForFixedCount { - B build(int offsetInOwningOrd, int docCount, InternalAggregations subAggregationResults); + B build(int offsetInOwningOrd, long docCount, InternalAggregations subAggregationResults); } /** @@ -432,7 +436,7 @@ protected final InternalAggregation[] buildAggregationsForVariableBuckets( */ @FunctionalInterface protected interface BucketBuilderForVariable { - B build(long bucketValue, int docCount, InternalAggregations subAggregationResults); + B build(long bucketValue, long docCount, InternalAggregations subAggregationResults); } /** @@ -466,7 +470,7 @@ public BucketComparator bucketComparator(String key, SortOrder order) { return super.bucketComparator(key, order); } if (key == null || "doc_count".equals(key)) { - return (lhs, rhs) -> order.reverseMul() * Integer.compare(bucketDocCount(lhs), bucketDocCount(rhs)); + return (lhs, rhs) -> order.reverseMul() * Long.compare(bucketDocCount(lhs), bucketDocCount(rhs)); } throw new IllegalArgumentException( "Ordering on a single-bucket aggregation can only be done on its doc_count. " @@ -478,6 +482,13 @@ public BucketComparator bucketComparator(String key, SortOrder order) { ); } + @Override + protected void preGetSubLeafCollectors(LeafReaderContext ctx) throws IOException { + super.preGetSubLeafCollectors(ctx); + // Set LeafReaderContext to the doc_count provider + docCountProvider.setLeafReaderContext(ctx); + } + public static boolean descendsFromGlobalAggregator(Aggregator parent) { while (parent != null) { if (parent.getClass() == GlobalAggregator.class) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/DocCountProvider.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/DocCountProvider.java new file mode 100644 index 0000000000000..30fe16ec4eb0e --- /dev/null +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/DocCountProvider.java @@ -0,0 +1,62 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.search.aggregations.bucket; + +import org.apache.lucene.index.DocValues; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NumericDocValues; +import org.opensearch.index.mapper.DocCountFieldMapper; + +import java.io.IOException; + +/** + * An implementation of a doc_count provider that reads the value + * of the _doc_count field in the document. If a document does not have a + * _doc_count field the implementation will return 1 as the default value. + */ +public class DocCountProvider { + + private NumericDocValues docCountValues; + + public long getDocCount(int doc) throws IOException { + if (docCountValues != null && docCountValues.advanceExact(doc)) { + return docCountValues.longValue(); + } else { + return 1L; + } + } + + public void setLeafReaderContext(LeafReaderContext ctx) throws IOException { + docCountValues = DocValues.getNumeric(ctx.reader(), DocCountFieldMapper.NAME); + } +} diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregator.java index c33887878dbd6..c9efbf7d34348 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregator.java @@ -228,7 +228,7 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I List buckets = new ArrayList<>(filters.length); for (int i = 0; i < keys.length; i++) { long bucketOrd = bucketOrd(owningBucketOrds[owningBucketOrdIdx], i); - int docCount = bucketDocCount(bucketOrd); + long docCount = bucketDocCount(bucketOrd); // Empty buckets are not returned because this aggregation will commonly be used under a // a date-histogram where we will look for transactions over time and can expect many // empty buckets. @@ -245,7 +245,7 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I for (int i = 0; i < keys.length; i++) { for (int j = i + 1; j < keys.length; j++) { long bucketOrd = bucketOrd(owningBucketOrds[owningBucketOrdIdx], pos); - int docCount = bucketDocCount(bucketOrd); + long docCount = bucketDocCount(bucketOrd); // Empty buckets are not returned due to potential for very sparse matrices if (docCount > 0) { String intersectKey = keys[i] + separator + keys[j]; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregator.java index a907d17a731fe..8b487fc499602 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregator.java @@ -196,7 +196,7 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I int slot = queue.pop(); CompositeKey key = queue.toCompositeKey(slot); InternalAggregations aggs = subAggsForBuckets[slot]; - int docCount = queue.getDocCount(slot); + long docCount = queue.getDocCount(slot); buckets[queue.size()] = new InternalComposite.InternalBucket( sourceNames, formats, @@ -504,7 +504,8 @@ private LeafBucketCollector getFirstPassCollector(RoaringDocIdSet.Builder builde @Override public void collect(int doc, long bucket) throws IOException { try { - if (queue.addIfCompetitive(indexSortPrefix)) { + long docCount = docCountProvider.getDocCount(doc); + if (queue.addIfCompetitive(indexSortPrefix, docCount)) { if (builder != null && lastDoc != doc) { builder.add(doc); lastDoc = doc; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueue.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueue.java index ac50869b5d79b..6ee1682a7b196 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueue.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueue.java @@ -38,7 +38,7 @@ import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.BigArrays; -import org.opensearch.common.util.IntArray; +import org.opensearch.common.util.LongArray; import org.opensearch.search.aggregations.LeafBucketCollector; import java.io.IOException; @@ -80,7 +80,7 @@ public int hashCode() { private final Map map; private final SingleDimensionValuesSource[] arrays; - private IntArray docCounts; + private LongArray docCounts; private boolean afterKeyIsSet = false; /** @@ -103,7 +103,7 @@ public int hashCode() { sources[i].setAfter(afterKey.get(i)); } } - this.docCounts = bigArrays.newIntArray(1, false); + this.docCounts = bigArrays.newLongArray(1, false); } @Override @@ -143,19 +143,19 @@ Comparable getUpperValueLeadSource() throws IOException { /** * Returns the document count in slot. */ - int getDocCount(int slot) { + long getDocCount(int slot) { return docCounts.get(slot); } /** * Copies the current value in slot. */ - private void copyCurrent(int slot) { + private void copyCurrent(int slot, long value) { for (int i = 0; i < arrays.length; i++) { arrays[i].copyCurrent(slot); } docCounts = bigArrays.grow(docCounts, slot + 1); - docCounts.set(slot, 1); + docCounts.set(slot, value); } /** @@ -265,8 +265,8 @@ LeafBucketCollector getLeafCollector(Comparable forceLeadSourceValue, LeafReader * Check if the current candidate should be added in the queue. * @return true if the candidate is competitive (added or already in the queue). */ - boolean addIfCompetitive() { - return addIfCompetitive(0); + boolean addIfCompetitive(long inc) { + return addIfCompetitive(0, inc); } /** @@ -279,12 +279,12 @@ boolean addIfCompetitive() { * * @throws CollectionTerminatedException if the current collection can be terminated early due to index sorting. */ - boolean addIfCompetitive(int indexSortSourcePrefix) { + boolean addIfCompetitive(int indexSortSourcePrefix, long inc) { // checks if the candidate key is competitive Integer topSlot = compareCurrent(); if (topSlot != null) { // this key is already in the top N, skip it - docCounts.increment(topSlot, 1); + docCounts.increment(topSlot, inc); return true; } if (afterKeyIsSet) { @@ -325,7 +325,7 @@ boolean addIfCompetitive(int indexSortSourcePrefix) { newSlot = size(); } // move the candidate key to its new slot - copyCurrent(newSlot); + copyCurrent(newSlot, inc); map.put(new Slot(newSlot), newSlot); add(newSlot); return true; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/SortedDocsProducer.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/SortedDocsProducer.java index 3cdbee6b119ec..bd0a4f13ddf08 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/SortedDocsProducer.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/SortedDocsProducer.java @@ -40,6 +40,7 @@ import org.apache.lucene.util.DocIdSetBuilder; import org.opensearch.common.Nullable; import org.opensearch.search.aggregations.LeafBucketCollector; +import org.opensearch.search.aggregations.bucket.DocCountProvider; import java.io.IOException; @@ -74,6 +75,8 @@ protected boolean processBucket( ) throws IOException { final int[] topCompositeCollected = new int[1]; final boolean[] hasCollected = new boolean[1]; + final DocCountProvider docCountProvider = new DocCountProvider(); + docCountProvider.setLeafReaderContext(context); final LeafBucketCollector queueCollector = new LeafBucketCollector() { int lastDoc = -1; @@ -86,7 +89,8 @@ protected boolean processBucket( @Override public void collect(int doc, long bucket) throws IOException { hasCollected[0] = true; - if (queue.addIfCompetitive()) { + long docCount = docCountProvider.getDocCount(doc); + if (queue.addIfCompetitive(docCount)) { topCompositeCollected[0]++; if (adder != null && doc != lastDoc) { if (remainingBits == 0) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/NestedAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/NestedAggregator.java index 5dc4d1c780350..02507f8569ffa 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/NestedAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/NestedAggregator.java @@ -130,7 +130,8 @@ public void collect(int parentDoc, long bucket) throws IOException { } @Override - protected void preGetSubLeafCollectors() throws IOException { + protected void preGetSubLeafCollectors(LeafReaderContext ctx) throws IOException { + super.preGetSubLeafCollectors(ctx); processBufferedDocs(); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java index 4fcc42b6211b2..2475ade68bc07 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java @@ -43,7 +43,6 @@ import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; -import org.opensearch.common.util.IntArray; import org.opensearch.common.util.LongArray; import org.opensearch.common.util.LongHash; import org.opensearch.common.xcontent.XContentBuilder; @@ -297,7 +296,7 @@ protected void doClose() { static class LowCardinality extends GlobalOrdinalsStringTermsAggregator { private LongUnaryOperator mapping; - private IntArray segmentDocCounts; + private LongArray segmentDocCounts; LowCardinality( String name, @@ -332,7 +331,7 @@ static class LowCardinality extends GlobalOrdinalsStringTermsAggregator { metadata ); assert factories == null || factories.countAggregators() == 0; - this.segmentDocCounts = context.bigArrays().newIntArray(1, true); + this.segmentDocCounts = context.bigArrays().newLongArray(1, true); } @Override @@ -356,7 +355,8 @@ public void collect(int doc, long owningBucketOrd) throws IOException { return; } int ord = singleValues.ordValue(); - segmentDocCounts.increment(ord + 1, 1); + long docCount = docCountProvider.getDocCount(doc); + segmentDocCounts.increment(ord + 1, docCount); } }); } @@ -369,7 +369,8 @@ public void collect(int doc, long owningBucketOrd) throws IOException { return; } for (long segmentOrd = segmentOrds.nextOrd(); segmentOrd != NO_MORE_ORDS; segmentOrd = segmentOrds.nextOrd()) { - segmentDocCounts.increment(segmentOrd + 1, 1); + long docCount = docCountProvider.getDocCount(doc); + segmentDocCounts.increment(segmentOrd + 1, docCount); } } }); @@ -392,7 +393,7 @@ private void mapSegmentCountsToGlobalCounts(LongUnaryOperator mapping) throws IO for (long i = 1; i < segmentDocCounts.size(); i++) { // We use set(...) here, because we need to reset the slow to 0. // segmentDocCounts get reused over the segments and otherwise counts would be too high. - int inc = segmentDocCounts.set(i, 0); + long inc = segmentDocCounts.set(i, 0); if (inc == 0) { continue; } diff --git a/server/src/test/java/org/opensearch/index/mapper/DocCountFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/DocCountFieldMapperTests.java new file mode 100644 index 0000000000000..634276a34b9af --- /dev/null +++ b/server/src/test/java/org/opensearch/index/mapper/DocCountFieldMapperTests.java @@ -0,0 +1,83 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ +package org.opensearch.index.mapper; + +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.IndexableField; + +import static org.hamcrest.Matchers.containsString; + +public class DocCountFieldMapperTests extends MapperServiceTestCase { + + private static final String CONTENT_TYPE = DocCountFieldMapper.CONTENT_TYPE; + private static final String DOC_COUNT_FIELD = DocCountFieldMapper.NAME; + + /** + * Test parsing field mapping and adding simple field + */ + public void testParseValue() throws Exception { + DocumentMapper mapper = createDocumentMapper(mapping(b -> {})); + ParsedDocument doc = mapper.parse(source(b -> b.field("foo", 500).field(CONTENT_TYPE, 100))); + + IndexableField field = doc.rootDoc().getField(DOC_COUNT_FIELD); + assertEquals(100L, field.numericValue()); + assertEquals(DocValuesType.NUMERIC, field.fieldType().docValuesType()); + assertEquals(1, doc.rootDoc().getFields(DOC_COUNT_FIELD).length); + } + + public void testInvalidDocument_NegativeDocCount() throws Exception { + DocumentMapper mapper = createDocumentMapper(mapping(b -> {})); + Exception e = expectThrows(MapperParsingException.class, () -> mapper.parse(source(b -> b.field(CONTENT_TYPE, -100)))); + assertThat(e.getCause().getMessage(), containsString("Field [_doc_count] must be a positive integer")); + } + + public void testInvalidDocument_ZeroDocCount() throws Exception { + DocumentMapper mapper = createDocumentMapper(mapping(b -> {})); + Exception e = expectThrows(MapperParsingException.class, () -> mapper.parse(source(b -> b.field(CONTENT_TYPE, 0)))); + assertThat(e.getCause().getMessage(), containsString("Field [_doc_count] must be a positive integer")); + } + + public void testInvalidDocument_NonNumericDocCount() throws Exception { + DocumentMapper mapper = createDocumentMapper(mapping(b -> {})); + Exception e = expectThrows(MapperParsingException.class, () -> mapper.parse(source(b -> b.field(CONTENT_TYPE, "foo")))); + assertThat( + e.getCause().getMessage(), + containsString("Failed to parse object: expecting token of type [VALUE_NUMBER] but found [VALUE_STRING]") + ); + } + + public void testInvalidDocument_FractionalDocCount() throws Exception { + DocumentMapper mapper = createDocumentMapper(mapping(b -> {})); + Exception e = expectThrows(MapperParsingException.class, () -> mapper.parse(source(b -> b.field(CONTENT_TYPE, 100.23)))); + assertThat(e.getCause().getMessage(), containsString("100.23 cannot be converted to Long without data loss")); + } +} diff --git a/server/src/test/java/org/opensearch/index/mapper/DocCountFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/DocCountFieldTypeTests.java new file mode 100644 index 0000000000000..4961736ea933c --- /dev/null +++ b/server/src/test/java/org/opensearch/index/mapper/DocCountFieldTypeTests.java @@ -0,0 +1,69 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ +package org.opensearch.index.mapper; + +import org.apache.lucene.search.DocValuesFieldExistsQuery; +import org.opensearch.index.query.QueryShardException; + +import java.io.IOException; +import java.util.Arrays; + +public class DocCountFieldTypeTests extends FieldTypeTestCase { + + public void testTermQuery() { + MappedFieldType ft = new DocCountFieldMapper.DocCountFieldType(); + QueryShardException e = expectThrows(QueryShardException.class, () -> ft.termQuery(10L, randomMockShardContext())); + assertEquals("Field [_doc_count] of type [_doc_count] is not searchable", e.getMessage()); + } + + public void testRangeQuery() { + MappedFieldType ft = new DocCountFieldMapper.DocCountFieldType(); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> ft.rangeQuery(null, null, randomBoolean(), randomBoolean(), null, null, null, null) + ); + assertEquals("Field [_doc_count] of type [_doc_count] does not support range queries", e.getMessage()); + } + + public void testExistsQuery() { + MappedFieldType ft = new DocCountFieldMapper.DocCountFieldType(); + assertTrue(ft.existsQuery(randomMockShardContext()) instanceof DocValuesFieldExistsQuery); + } + + public void testFetchSourceValue() throws IOException { + MappedFieldType fieldType = new DocCountFieldMapper.DocCountFieldType(); + assertEquals(Arrays.asList(14L), fetchSourceValue(fieldType, 14)); + assertEquals(Arrays.asList(14L), fetchSourceValue(fieldType, "14")); + assertEquals(Arrays.asList(1L), fetchSourceValue(fieldType, "")); + assertEquals(Arrays.asList(1L), fetchSourceValue(fieldType, null)); + } +} diff --git a/server/src/test/java/org/opensearch/indices/IndicesModuleTests.java b/server/src/test/java/org/opensearch/indices/IndicesModuleTests.java index afcc6aa006500..ec7ab06ac86a6 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesModuleTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesModuleTests.java @@ -33,6 +33,7 @@ package org.opensearch.indices; import org.opensearch.Version; +import org.opensearch.index.mapper.DocCountFieldMapper; import org.opensearch.index.mapper.DataStreamFieldMapper; import org.opensearch.index.mapper.FieldNamesFieldMapper; import org.opensearch.index.mapper.IdFieldMapper; @@ -98,6 +99,7 @@ public Map getMetadataMappers() { NestedPathFieldMapper.NAME, VersionFieldMapper.NAME, SeqNoFieldMapper.NAME, + DocCountFieldMapper.NAME, FieldNamesFieldMapper.NAME }; public void testBuiltinMappers() { diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/DocCountProviderTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/DocCountProviderTests.java new file mode 100644 index 0000000000000..708d683fe2484 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/DocCountProviderTests.java @@ -0,0 +1,107 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.search.aggregations.bucket; + +import org.apache.lucene.document.IntPoint; +import org.apache.lucene.document.NumericDocValuesField; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.opensearch.common.CheckedConsumer; +import org.opensearch.index.mapper.DocCountFieldMapper; +import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.index.mapper.NumberFieldMapper; +import org.opensearch.search.aggregations.AggregatorTestCase; +import org.opensearch.search.aggregations.bucket.global.GlobalAggregationBuilder; +import org.opensearch.search.aggregations.bucket.global.InternalGlobal; + +import java.io.IOException; +import java.util.List; +import java.util.function.Consumer; + +import static java.util.Collections.singleton; + +public class DocCountProviderTests extends AggregatorTestCase { + + private static final String DOC_COUNT_FIELD = DocCountFieldMapper.NAME; + private static final String NUMBER_FIELD = "number"; + + public void testDocsWithDocCount() throws IOException { + testAggregation(new MatchAllDocsQuery(), iw -> { + iw.addDocument(List.of(new NumericDocValuesField(DOC_COUNT_FIELD, 4), new SortedNumericDocValuesField(NUMBER_FIELD, 1))); + iw.addDocument(List.of(new NumericDocValuesField(DOC_COUNT_FIELD, 5), new SortedNumericDocValuesField(NUMBER_FIELD, 7))); + iw.addDocument( + List.of( + // Intentionally omit doc_count field + new SortedNumericDocValuesField(NUMBER_FIELD, 1) + ) + ); + }, global -> { assertEquals(10, global.getDocCount()); }); + } + + public void testDocsWithoutDocCount() throws IOException { + testAggregation(new MatchAllDocsQuery(), iw -> { + iw.addDocument(singleton(new SortedNumericDocValuesField(NUMBER_FIELD, 1))); + iw.addDocument(singleton(new SortedNumericDocValuesField(NUMBER_FIELD, 7))); + iw.addDocument(singleton(new SortedNumericDocValuesField(NUMBER_FIELD, 1))); + }, global -> { assertEquals(3, global.getDocCount()); }); + } + + public void testQueryFiltering() throws IOException { + testAggregation(IntPoint.newRangeQuery(NUMBER_FIELD, 4, 5), iw -> { + iw.addDocument(List.of(new NumericDocValuesField(DOC_COUNT_FIELD, 4), new IntPoint(NUMBER_FIELD, 6))); + iw.addDocument(List.of(new NumericDocValuesField(DOC_COUNT_FIELD, 2), new IntPoint(NUMBER_FIELD, 5))); + iw.addDocument( + List.of( + // Intentionally omit doc_count field + new IntPoint(NUMBER_FIELD, 1) + ) + ); + iw.addDocument( + List.of( + // Intentionally omit doc_count field + new IntPoint(NUMBER_FIELD, 5) + ) + ); + }, global -> { assertEquals(3, global.getDocCount()); }); + } + + private void testAggregation(Query query, CheckedConsumer indexer, Consumer verify) + throws IOException { + GlobalAggregationBuilder aggregationBuilder = new GlobalAggregationBuilder("_name"); + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NUMBER_FIELD, NumberFieldMapper.NumberType.LONG); + MappedFieldType docCountFieldType = new DocCountFieldMapper.DocCountFieldType(); + testCase(aggregationBuilder, query, indexer, verify, fieldType, docCountFieldType); + } +} diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueueTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueueTests.java index 3dccdf8dab95e..4571ae7496ae1 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueueTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueueTests.java @@ -347,7 +347,7 @@ private void testRandomCase(boolean forceMerge, boolean missingBucket, int index final LeafBucketCollector leafCollector = new LeafBucketCollector() { @Override public void collect(int doc, long bucket) throws IOException { - queue.addIfCompetitive(indexSortSourcePrefix); + queue.addIfCompetitive(indexSortSourcePrefix, 1); } }; final LeafBucketCollector queueCollector = queue.getLeafCollector(leafReaderContext, leafCollector); From e65aacaf29e84f6ae4468499f738bd6b1fb88f09 Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Thu, 28 Jul 2022 12:36:43 -0700 Subject: [PATCH 042/104] Deprecate public methods and variables that contain 'master' terminology in class 'NoMasterBlockService' and 'MasterService' (#4006) Deprecates public methods and variables that contain 'master' terminology in class NoMasterBlockService and MasterService. Unit tests are also added to ensure consistency with the new naming. Signed-off-by: Tianli Feng --- .../index/rankeval/RankEvalResponseTests.java | 2 +- .../cluster/ClusterStateDiffIT.java | 4 +- .../cluster/MinimumClusterManagerNodesIT.java | 24 +++++------ .../cluster/NoClusterManagerNodeIT.java | 6 +-- .../UnsafeBootstrapAndDetachCommandIT.java | 8 ++-- .../discovery/ClusterManagerDisruptionIT.java | 8 +++- .../cluster/coordination/Coordinator.java | 10 ++--- .../coordination/JoinTaskExecutor.java | 4 +- .../NoClusterManagerBlockService.java | 43 +++++++++++++------ .../cluster/service/ClusterService.java | 2 +- .../cluster/service/MasterService.java | 37 +++++++++++----- .../common/util/concurrent/BaseFuture.java | 2 +- .../ExceptionSerializationTests.java | 6 ++- .../opensearch/OpenSearchExceptionTests.java | 7 ++- ...TransportResyncReplicationActionTests.java | 2 +- ...rnalClusterInfoServiceSchedulingTests.java | 4 +- .../coordination/CoordinatorTests.java | 14 +++--- ...anagerBlockServiceRenamedSettingTests.java | 8 ++-- .../NoClusterManagerBlockServiceTests.java | 20 ++++----- .../service/ClusterApplierServiceTests.java | 2 +- .../cluster/service/MasterServiceTests.java | 13 ++++++ ...ClusterStateServiceRandomUpdatesTests.java | 16 ++++--- .../AbstractCoordinatorTestCase.java | 8 ++-- .../org/opensearch/test/RandomObjects.java | 2 +- 24 files changed, 159 insertions(+), 93 deletions(-) diff --git a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RankEvalResponseTests.java b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RankEvalResponseTests.java index 463df690be763..c8627932f2a9a 100644 --- a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RankEvalResponseTests.java +++ b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RankEvalResponseTests.java @@ -76,7 +76,7 @@ public class RankEvalResponseTests extends OpenSearchTestCase { private static final Exception[] RANDOM_EXCEPTIONS = new Exception[] { - new ClusterBlockException(singleton(NoClusterManagerBlockService.NO_MASTER_BLOCK_WRITES)), + new ClusterBlockException(singleton(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_WRITES)), new CircuitBreakingException("Data too large", 123, 456, CircuitBreaker.Durability.PERMANENT), new SearchParseException(SHARD_TARGET, "Parse failure", new XContentLocation(12, 98)), new IllegalArgumentException("Closed resource", new RuntimeException("Resource")), diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterStateDiffIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterStateDiffIT.java index 52c2cf9bfdcd0..7d26a0a31833b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterStateDiffIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterStateDiffIT.java @@ -396,9 +396,9 @@ private ClusterState.Builder randomBlocks(ClusterState clusterState) { private ClusterBlock randomGlobalBlock() { switch (randomInt(2)) { case 0: - return NoClusterManagerBlockService.NO_MASTER_BLOCK_ALL; + return NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ALL; case 1: - return NoClusterManagerBlockService.NO_MASTER_BLOCK_WRITES; + return NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_WRITES; default: return GatewayService.STATE_NOT_RECOVERED_BLOCK; } diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumClusterManagerNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumClusterManagerNodesIT.java index 0bed559085b27..3e2a1f1452628 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumClusterManagerNodesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumClusterManagerNodesIT.java @@ -93,7 +93,7 @@ public void testTwoNodesNoClusterManagerBlock() throws Exception { logger.info("--> should be blocked, no cluster-manager..."); ClusterState state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); - assertThat(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID), equalTo(true)); + assertThat(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ID), equalTo(true)); assertThat(state.nodes().getSize(), equalTo(1)); // verify that we still see the local node in the cluster state logger.info("--> start second node, cluster should be formed"); @@ -109,9 +109,9 @@ public void testTwoNodesNoClusterManagerBlock() throws Exception { assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); - assertThat(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); + assertThat(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ID), equalTo(false)); state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); - assertThat(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); + assertThat(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ID), equalTo(false)); state = client().admin().cluster().prepareState().execute().actionGet().getState(); assertThat(state.nodes().getSize(), equalTo(2)); @@ -161,11 +161,11 @@ public void testTwoNodesNoClusterManagerBlock() throws Exception { assertBusy(() -> { ClusterState clusterState = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); - assertTrue(clusterState.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID)); + assertTrue(clusterState.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ID)); }); state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); - assertThat(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID), equalTo(true)); + assertThat(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ID), equalTo(true)); // verify that both nodes are still in the cluster state but there is no cluster-manager assertThat(state.nodes().getSize(), equalTo(2)); assertThat(state.nodes().getClusterManagerNode(), equalTo(null)); @@ -184,9 +184,9 @@ public void testTwoNodesNoClusterManagerBlock() throws Exception { assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); - assertThat(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); + assertThat(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ID), equalTo(false)); state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); - assertThat(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); + assertThat(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ID), equalTo(false)); state = client().admin().cluster().prepareState().execute().actionGet().getState(); assertThat(state.nodes().getSize(), equalTo(2)); @@ -214,7 +214,7 @@ public void testTwoNodesNoClusterManagerBlock() throws Exception { assertBusy(() -> { ClusterState state1 = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); - assertThat(state1.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID), equalTo(true)); + assertThat(state1.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ID), equalTo(true)); }); logger.info("--> starting the previous cluster-manager node again..."); @@ -232,9 +232,9 @@ public void testTwoNodesNoClusterManagerBlock() throws Exception { assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); - assertThat(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); + assertThat(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ID), equalTo(false)); state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); - assertThat(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); + assertThat(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ID), equalTo(false)); state = client().admin().cluster().prepareState().execute().actionGet().getState(); assertThat(state.nodes().getSize(), equalTo(2)); @@ -262,7 +262,7 @@ public void testThreeNodesNoClusterManagerBlock() throws Exception { assertBusy(() -> { for (Client client : clients()) { ClusterState state1 = client.admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); - assertThat(state1.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID), equalTo(true)); + assertThat(state1.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ID), equalTo(true)); } }); @@ -324,7 +324,7 @@ public void testThreeNodesNoClusterManagerBlock() throws Exception { // spin here to wait till the state is set assertBusy(() -> { ClusterState st = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); - assertThat(st.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID), equalTo(true)); + assertThat(st.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ID), equalTo(true)); }); logger.info("--> start back the 2 nodes "); diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/NoClusterManagerNodeIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/NoClusterManagerNodeIT.java index 0a4d9f8564b61..313f9e0da17aa 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/NoClusterManagerNodeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/NoClusterManagerNodeIT.java @@ -117,7 +117,7 @@ public void testNoClusterManagerActions() throws Exception { .execute() .actionGet() .getState(); - assertTrue(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID)); + assertTrue(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ID)); }); assertRequestBuilderThrows( @@ -288,7 +288,7 @@ public void testNoClusterManagerActionsWriteClusterManagerBlock() throws Excepti assertBusy(() -> { ClusterState state = clientToClusterManagerlessNode.admin().cluster().prepareState().setLocal(true).get().getState(); - assertTrue(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID)); + assertTrue(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ID)); }); GetResponse getResponse = clientToClusterManagerlessNode.prepareGet("test1", "1").get(); @@ -387,7 +387,7 @@ public void testNoClusterManagerActionsMetadataWriteClusterManagerBlock() throws assertBusy(() -> { for (String node : nodesWithShards) { ClusterState state = client(node).admin().cluster().prepareState().setLocal(true).get().getState(); - assertTrue(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID)); + assertTrue(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ID)); } }); diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java index 0bbeeea9e27a2..ebe65ad48f47e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java @@ -220,7 +220,7 @@ public void testBootstrapNotBootstrappedCluster() throws Exception { ); assertBusy(() -> { ClusterState state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); - assertTrue(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID)); + assertTrue(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ID)); }); Settings dataPathSettings = internalCluster().dataPathSettings(node); @@ -338,7 +338,7 @@ public void test3ClusterManagerNodes2Failed() throws Exception { .execute() .actionGet() .getState(); - assertTrue(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID)); + assertTrue(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ID)); }); logger.info("--> try to unsafely bootstrap 1st cluster-manager-eligible node, while node lock is held"); @@ -394,7 +394,7 @@ public void test3ClusterManagerNodes2Failed() throws Exception { .execute() .actionGet() .getState(); - assertFalse(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID)); + assertFalse(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ID)); assertTrue( state.metadata().persistentSettings().getAsBoolean(UnsafeBootstrapClusterManagerCommand.UNSAFE_BOOTSTRAP.getKey(), false) ); @@ -508,7 +508,7 @@ public void testNoInitialBootstrapAfterDetach() throws Exception { ); ClusterState state = internalCluster().client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); - assertTrue(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID)); + assertTrue(state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ID)); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node)); } diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java index 412b325b14e73..f548cfdb0eda2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java @@ -234,7 +234,11 @@ public void testVerifyApiBlocksDuringPartition() throws Exception { // continuously ping until network failures have been resolved. However // It may a take a bit before the node detects it has been cut off from the elected cluster-manager logger.info("waiting for isolated node [{}] to have no cluster-manager", isolatedNode); - assertNoClusterManager(isolatedNode, NoClusterManagerBlockService.NO_MASTER_BLOCK_METADATA_WRITES, TimeValue.timeValueSeconds(30)); + assertNoClusterManager( + isolatedNode, + NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_METADATA_WRITES, + TimeValue.timeValueSeconds(30) + ); logger.info("wait until elected cluster-manager has been removed and a new 2 node cluster was from (via [{}])", isolatedNode); ensureStableCluster(2, nonIsolatedNode); @@ -280,7 +284,7 @@ public void testVerifyApiBlocksDuringPartition() throws Exception { // continuously ping until network failures have been resolved. However // It may a take a bit before the node detects it has been cut off from the elected cluster-manager logger.info("waiting for isolated node [{}] to have no cluster-manager", isolatedNode); - assertNoClusterManager(isolatedNode, NoClusterManagerBlockService.NO_MASTER_BLOCK_ALL, TimeValue.timeValueSeconds(30)); + assertNoClusterManager(isolatedNode, NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ALL, TimeValue.timeValueSeconds(30)); // make sure we have stable cluster & cross partition recoveries are canceled by the removal of the missing node // the unresponsive partition causes recoveries to only time out after 15m (default) and these will cause diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java index 14f9cca3630fe..36dd4aba104b2 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java @@ -105,7 +105,7 @@ import java.util.stream.Stream; import java.util.stream.StreamSupport; -import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_MASTER_BLOCK_ID; +import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ID; import static org.opensearch.gateway.ClusterStateUpdaters.hideStateIfNotRecovered; import static org.opensearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; import static org.opensearch.monitor.StatusInfo.Status.UNHEALTHY; @@ -839,7 +839,7 @@ protected void doStart() { .blocks( ClusterBlocks.builder() .addGlobalBlock(STATE_NOT_RECOVERED_BLOCK) - .addGlobalBlock(noClusterManagerBlockService.getNoMasterBlock()) + .addGlobalBlock(noClusterManagerBlockService.getNoClusterManagerBlock()) ) .nodes(DiscoveryNodes.builder().add(getLocalNode()).localNodeId(getLocalNode().getId())) .build(); @@ -886,7 +886,7 @@ public void invariant() { assert followersChecker.getFastResponseState().term == getCurrentTerm() : followersChecker.getFastResponseState(); assert followersChecker.getFastResponseState().mode == getMode() : followersChecker.getFastResponseState(); assert (applierState.nodes().getClusterManagerNodeId() == null) == applierState.blocks() - .hasGlobalBlockWithId(NO_MASTER_BLOCK_ID); + .hasGlobalBlockWithId(NO_CLUSTER_MANAGER_BLOCK_ID); assert preVoteCollector.getPreVoteResponse().equals(getPreVoteResponse()) : preVoteCollector + " vs " + getPreVoteResponse(); assert lagDetector.getTrackedNodes().contains(getLocalNode()) == false : lagDetector.getTrackedNodes(); @@ -1221,11 +1221,11 @@ ClusterState getStateForClusterManagerService() { private ClusterState clusterStateWithNoClusterManagerBlock(ClusterState clusterState) { if (clusterState.nodes().getClusterManagerNodeId() != null) { // remove block if it already exists before adding new one - assert clusterState.blocks().hasGlobalBlockWithId(NO_MASTER_BLOCK_ID) == false + assert clusterState.blocks().hasGlobalBlockWithId(NO_CLUSTER_MANAGER_BLOCK_ID) == false : "NO_MASTER_BLOCK should only be added by Coordinator"; final ClusterBlocks clusterBlocks = ClusterBlocks.builder() .blocks(clusterState.blocks()) - .addGlobalBlock(noClusterManagerBlockService.getNoMasterBlock()) + .addGlobalBlock(noClusterManagerBlockService.getNoClusterManagerBlock()) .build(); final DiscoveryNodes discoveryNodes = new DiscoveryNodes.Builder(clusterState.nodes()).clusterManagerNodeId(null).build(); return ClusterState.builder(clusterState).blocks(clusterBlocks).nodes(discoveryNodes).build(); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java index 7582946bb8f3f..5afdb5b12db23 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java @@ -285,7 +285,9 @@ protected ClusterState.Builder becomeClusterManagerAndTrimConflictingNodes(Clust ClusterState tmpState = ClusterState.builder(currentState) .nodes(nodesBuilder) .blocks( - ClusterBlocks.builder().blocks(currentState.blocks()).removeGlobalBlock(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID) + ClusterBlocks.builder() + .blocks(currentState.blocks()) + .removeGlobalBlock(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ID) ) .build(); logger.trace("becomeClusterManagerAndTrimConflictingNodes: {}", tmpState.nodes()); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/NoClusterManagerBlockService.java b/server/src/main/java/org/opensearch/cluster/coordination/NoClusterManagerBlockService.java index 44ced46048736..71e9a87cdffae 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/NoClusterManagerBlockService.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/NoClusterManagerBlockService.java @@ -47,9 +47,9 @@ * @opensearch.internal */ public class NoClusterManagerBlockService { - public static final int NO_MASTER_BLOCK_ID = 2; - public static final ClusterBlock NO_MASTER_BLOCK_WRITES = new ClusterBlock( - NO_MASTER_BLOCK_ID, + public static final int NO_CLUSTER_MANAGER_BLOCK_ID = 2; + public static final ClusterBlock NO_CLUSTER_MANAGER_BLOCK_WRITES = new ClusterBlock( + NO_CLUSTER_MANAGER_BLOCK_ID, "no cluster-manager", true, false, @@ -57,8 +57,8 @@ public class NoClusterManagerBlockService { RestStatus.SERVICE_UNAVAILABLE, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE) ); - public static final ClusterBlock NO_MASTER_BLOCK_ALL = new ClusterBlock( - NO_MASTER_BLOCK_ID, + public static final ClusterBlock NO_CLUSTER_MANAGER_BLOCK_ALL = new ClusterBlock( + NO_CLUSTER_MANAGER_BLOCK_ID, "no cluster-manager", true, true, @@ -66,8 +66,8 @@ public class NoClusterManagerBlockService { RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL ); - public static final ClusterBlock NO_MASTER_BLOCK_METADATA_WRITES = new ClusterBlock( - NO_MASTER_BLOCK_ID, + public static final ClusterBlock NO_CLUSTER_MANAGER_BLOCK_METADATA_WRITES = new ClusterBlock( + NO_CLUSTER_MANAGER_BLOCK_ID, "no cluster-manager", true, false, @@ -76,6 +76,19 @@ public class NoClusterManagerBlockService { EnumSet.of(ClusterBlockLevel.METADATA_WRITE) ); + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #NO_CLUSTER_MANAGER_BLOCK_ID} */ + @Deprecated + public static final int NO_MASTER_BLOCK_ID = NO_CLUSTER_MANAGER_BLOCK_ID; + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #NO_CLUSTER_MANAGER_BLOCK_WRITES} */ + @Deprecated + public static final ClusterBlock NO_MASTER_BLOCK_WRITES = NO_CLUSTER_MANAGER_BLOCK_WRITES; + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #NO_CLUSTER_MANAGER_BLOCK_ALL} */ + @Deprecated + public static final ClusterBlock NO_MASTER_BLOCK_ALL = NO_CLUSTER_MANAGER_BLOCK_ALL; + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #NO_CLUSTER_MANAGER_BLOCK_METADATA_WRITES} */ + @Deprecated + public static final ClusterBlock NO_MASTER_BLOCK_METADATA_WRITES = NO_CLUSTER_MANAGER_BLOCK_METADATA_WRITES; + public static final Setting NO_MASTER_BLOCK_SETTING = new Setting<>( "cluster.no_master_block", "metadata_write", @@ -98,17 +111,17 @@ public class NoClusterManagerBlockService { public NoClusterManagerBlockService(Settings settings, ClusterSettings clusterSettings) { this.noClusterManagerBlock = NO_CLUSTER_MANAGER_BLOCK_SETTING.get(settings); - clusterSettings.addSettingsUpdateConsumer(NO_CLUSTER_MANAGER_BLOCK_SETTING, this::setNoMasterBlock); + clusterSettings.addSettingsUpdateConsumer(NO_CLUSTER_MANAGER_BLOCK_SETTING, this::setNoClusterManagerBlock); } private static ClusterBlock parseNoClusterManagerBlock(String value) { switch (value) { case "all": - return NO_MASTER_BLOCK_ALL; + return NO_CLUSTER_MANAGER_BLOCK_ALL; case "write": - return NO_MASTER_BLOCK_WRITES; + return NO_CLUSTER_MANAGER_BLOCK_WRITES; case "metadata_write": - return NO_MASTER_BLOCK_METADATA_WRITES; + return NO_CLUSTER_MANAGER_BLOCK_METADATA_WRITES; default: throw new IllegalArgumentException( "invalid no-cluster-manager block [" + value + "], must be one of [all, write, metadata_write]" @@ -116,11 +129,17 @@ private static ClusterBlock parseNoClusterManagerBlock(String value) { } } + public ClusterBlock getNoClusterManagerBlock() { + return noClusterManagerBlock; + } + + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #getNoClusterManagerBlock()} */ + @Deprecated public ClusterBlock getNoMasterBlock() { return noClusterManagerBlock; } - private void setNoMasterBlock(ClusterBlock noClusterManagerBlock) { + private void setNoClusterManagerBlock(ClusterBlock noClusterManagerBlock) { this.noClusterManagerBlock = noClusterManagerBlock; } } diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterService.java index c58bb4d9a947c..17ad8412de525 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterService.java @@ -252,7 +252,7 @@ public ClusterApplierService getClusterApplierService() { public static boolean assertClusterOrClusterManagerStateThread() { assert Thread.currentThread().getName().contains(ClusterApplierService.CLUSTER_UPDATE_THREAD_NAME) - || Thread.currentThread().getName().contains(MasterService.MASTER_UPDATE_THREAD_NAME) + || Thread.currentThread().getName().contains(MasterService.CLUSTER_MANAGER_UPDATE_THREAD_NAME) : "not called from the master/cluster state update thread"; return true; } diff --git a/server/src/main/java/org/opensearch/cluster/service/MasterService.java b/server/src/main/java/org/opensearch/cluster/service/MasterService.java index cc58c56e00dd5..e20d93e76ef01 100644 --- a/server/src/main/java/org/opensearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/opensearch/cluster/service/MasterService.java @@ -107,6 +107,10 @@ public class MasterService extends AbstractLifecycleComponent { Setting.Property.NodeScope ); + static final String CLUSTER_MANAGER_UPDATE_THREAD_NAME = "clusterManagerService#updateTask"; + + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #CLUSTER_MANAGER_UPDATE_THREAD_NAME} */ + @Deprecated static final String MASTER_UPDATE_THREAD_NAME = "masterService#updateTask"; ClusterStatePublisher clusterStatePublisher; @@ -156,8 +160,8 @@ protected synchronized void doStart() { protected PrioritizedOpenSearchThreadPoolExecutor createThreadPoolExecutor() { return OpenSearchExecutors.newSinglePrioritizing( - nodeName + "/" + MASTER_UPDATE_THREAD_NAME, - daemonThreadFactory(nodeName, MASTER_UPDATE_THREAD_NAME), + nodeName + "/" + CLUSTER_MANAGER_UPDATE_THREAD_NAME, + daemonThreadFactory(nodeName, CLUSTER_MANAGER_UPDATE_THREAD_NAME), threadPool.getThreadContext(), threadPool.scheduler() ); @@ -228,24 +232,37 @@ ClusterState state() { return clusterStateSupplier.get(); } - private static boolean isMasterUpdateThread() { - return Thread.currentThread().getName().contains(MASTER_UPDATE_THREAD_NAME); + private static boolean isClusterManagerUpdateThread() { + return Thread.currentThread().getName().contains(CLUSTER_MANAGER_UPDATE_THREAD_NAME) + || Thread.currentThread().getName().contains(MASTER_UPDATE_THREAD_NAME); } - public static boolean assertMasterUpdateThread() { - assert isMasterUpdateThread() : "not called from the cluster-manager service thread"; + public static boolean assertClusterManagerUpdateThread() { + assert isClusterManagerUpdateThread() : "not called from the cluster-manager service thread"; return true; } - public static boolean assertNotMasterUpdateThread(String reason) { - assert isMasterUpdateThread() == false : "Expected current thread [" + public static boolean assertNotClusterManagerUpdateThread(String reason) { + assert isClusterManagerUpdateThread() == false : "Expected current thread [" + Thread.currentThread() - + "] to not be the cluster-maanger service thread. Reason: [" + + "] to not be the cluster-manager service thread. Reason: [" + reason + "]"; return true; } + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #assertClusterManagerUpdateThread()} */ + @Deprecated + public static boolean assertMasterUpdateThread() { + return assertClusterManagerUpdateThread(); + } + + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #assertNotClusterManagerUpdateThread(String)} */ + @Deprecated + public static boolean assertNotMasterUpdateThread(String reason) { + return assertNotClusterManagerUpdateThread(reason); + } + private void runTasks(TaskInputs taskInputs) { final String summary = taskInputs.summary; if (!lifecycle.started()) { @@ -314,7 +331,7 @@ protected void publish(ClusterChangedEvent clusterChangedEvent, TaskOutputs task final PlainActionFuture fut = new PlainActionFuture() { @Override protected boolean blockingAllowed() { - return isMasterUpdateThread() || super.blockingAllowed(); + return isClusterManagerUpdateThread() || super.blockingAllowed(); } }; clusterStatePublisher.publish(clusterChangedEvent, fut, taskOutputs.createAckListener(threadPool, clusterChangedEvent.state())); diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/BaseFuture.java b/server/src/main/java/org/opensearch/common/util/concurrent/BaseFuture.java index fef37299b349d..f394809aaacf7 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/BaseFuture.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/BaseFuture.java @@ -109,7 +109,7 @@ protected boolean blockingAllowed() { return Transports.assertNotTransportThread(BLOCKING_OP_REASON) && ThreadPool.assertNotScheduleThread(BLOCKING_OP_REASON) && ClusterApplierService.assertNotClusterStateUpdateThread(BLOCKING_OP_REASON) - && MasterService.assertNotMasterUpdateThread(BLOCKING_OP_REASON); + && MasterService.assertNotClusterManagerUpdateThread(BLOCKING_OP_REASON); } @Override diff --git a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java index c0d30a92e2d4d..5a93d7c0bd86e 100644 --- a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java @@ -516,9 +516,11 @@ public void testFailedNodeException() throws IOException { } public void testClusterBlockException() throws IOException { - ClusterBlockException ex = serialize(new ClusterBlockException(singleton(NoClusterManagerBlockService.NO_MASTER_BLOCK_WRITES))); + ClusterBlockException ex = serialize( + new ClusterBlockException(singleton(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_WRITES)) + ); assertEquals("blocked by: [SERVICE_UNAVAILABLE/2/no cluster-manager];", ex.getMessage()); - assertTrue(ex.blocks().contains(NoClusterManagerBlockService.NO_MASTER_BLOCK_WRITES)); + assertTrue(ex.blocks().contains(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_WRITES)); assertEquals(1, ex.blocks().size()); } diff --git a/server/src/test/java/org/opensearch/OpenSearchExceptionTests.java b/server/src/test/java/org/opensearch/OpenSearchExceptionTests.java index ca2940c742ec0..bd2695508dfcb 100644 --- a/server/src/test/java/org/opensearch/OpenSearchExceptionTests.java +++ b/server/src/test/java/org/opensearch/OpenSearchExceptionTests.java @@ -478,7 +478,10 @@ public void testToXContentWithHeadersAndMetadata() throws IOException { "foo", new OpenSearchException( "bar", - new OpenSearchException("baz", new ClusterBlockException(singleton(NoClusterManagerBlockService.NO_MASTER_BLOCK_WRITES))) + new OpenSearchException( + "baz", + new ClusterBlockException(singleton(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_WRITES)) + ) ) ); e.addHeader("foo_0", "0"); @@ -1032,7 +1035,7 @@ public static Tuple randomExceptions() { int type = randomIntBetween(0, 5); switch (type) { case 0: - actual = new ClusterBlockException(singleton(NoClusterManagerBlockService.NO_MASTER_BLOCK_WRITES)); + actual = new ClusterBlockException(singleton(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_WRITES)); expected = new OpenSearchException( "OpenSearch exception [type=cluster_block_exception, " + "reason=blocked by: [SERVICE_UNAVAILABLE/2/no cluster-manager];]" diff --git a/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java b/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java index 91d780e218e71..2ebca16519258 100644 --- a/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java +++ b/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java @@ -116,7 +116,7 @@ public void testResyncDoesNotBlockOnPrimaryAction() throws Exception { ClusterState.builder(clusterService.state()) .blocks( ClusterBlocks.builder() - .addGlobalBlock(NoClusterManagerBlockService.NO_MASTER_BLOCK_ALL) + .addGlobalBlock(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ALL) .addIndexBlock(indexName, IndexMetadata.INDEX_WRITE_BLOCK) ) ); diff --git a/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java b/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java index a989bacbf47bb..7a073456af1ba 100644 --- a/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java +++ b/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java @@ -210,7 +210,9 @@ protected void requestCount++; // ClusterInfoService handles ClusterBlockExceptions quietly, so we invent such an exception to avoid excess logging listener.onFailure( - new ClusterBlockException(org.opensearch.common.collect.Set.of(NoClusterManagerBlockService.NO_MASTER_BLOCK_ALL)) + new ClusterBlockException( + org.opensearch.common.collect.Set.of(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ALL) + ) ); } else { fail("unexpected action: " + action.name()); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java index 9f9de698296b8..d96c972bc6021 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java @@ -82,10 +82,10 @@ import static org.opensearch.cluster.coordination.LeaderChecker.LEADER_CHECK_INTERVAL_SETTING; import static org.opensearch.cluster.coordination.LeaderChecker.LEADER_CHECK_RETRY_COUNT_SETTING; import static org.opensearch.cluster.coordination.LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING; -import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_MASTER_BLOCK_ALL; -import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_MASTER_BLOCK_METADATA_WRITES; +import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ALL; +import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_METADATA_WRITES; import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING; -import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_MASTER_BLOCK_WRITES; +import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_WRITES; import static org.opensearch.cluster.coordination.Reconfigurator.CLUSTER_AUTO_SHRINK_VOTING_CONFIGURATION; import static org.opensearch.discovery.PeerFinder.DISCOVERY_FIND_PEERS_INTERVAL_SETTING; import static org.opensearch.monitor.StatusInfo.Status.HEALTHY; @@ -1122,19 +1122,19 @@ public void testStayCandidateAfterReceivingFollowerCheckFromKnownClusterManager( } public void testAppliesNoClusterManagerBlockWritesByDefault() { - testAppliesNoClusterManagerBlock(null, NO_MASTER_BLOCK_WRITES); + testAppliesNoClusterManagerBlock(null, NO_CLUSTER_MANAGER_BLOCK_WRITES); } public void testAppliesNoClusterManagerBlockWritesIfConfigured() { - testAppliesNoClusterManagerBlock("write", NO_MASTER_BLOCK_WRITES); + testAppliesNoClusterManagerBlock("write", NO_CLUSTER_MANAGER_BLOCK_WRITES); } public void testAppliesNoClusterManagerBlockAllIfConfigured() { - testAppliesNoClusterManagerBlock("all", NO_MASTER_BLOCK_ALL); + testAppliesNoClusterManagerBlock("all", NO_CLUSTER_MANAGER_BLOCK_ALL); } public void testAppliesNoClusterManagerBlockMetadataWritesIfConfigured() { - testAppliesNoClusterManagerBlock("metadata_write", NO_MASTER_BLOCK_METADATA_WRITES); + testAppliesNoClusterManagerBlock("metadata_write", NO_CLUSTER_MANAGER_BLOCK_METADATA_WRITES); } private void testAppliesNoClusterManagerBlock(String noClusterManagerBlockSetting, ClusterBlock expectedBlock) { diff --git a/server/src/test/java/org/opensearch/cluster/coordination/NoClusterManagerBlockServiceRenamedSettingTests.java b/server/src/test/java/org/opensearch/cluster/coordination/NoClusterManagerBlockServiceRenamedSettingTests.java index 42a230d05cbd4..8a85544299618 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/NoClusterManagerBlockServiceRenamedSettingTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/NoClusterManagerBlockServiceRenamedSettingTests.java @@ -56,7 +56,7 @@ public void testSettingFallback() { public void testSettingGetValue() { Settings settings = Settings.builder().put("cluster.no_cluster_manager_block", "all").build(); assertEquals( - NoClusterManagerBlockService.NO_MASTER_BLOCK_ALL, + NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ALL, NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.get(settings) ); assertEquals( @@ -71,7 +71,7 @@ public void testSettingGetValue() { public void testSettingGetValueWithFallback() { Settings settings = Settings.builder().put("cluster.no_master_block", "metadata_write").build(); assertEquals( - NoClusterManagerBlockService.NO_MASTER_BLOCK_METADATA_WRITES, + NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_METADATA_WRITES, NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.get(settings) ); assertSettingDeprecationsAndWarnings(new Setting[] { NoClusterManagerBlockService.NO_MASTER_BLOCK_SETTING }); @@ -86,11 +86,11 @@ public void testSettingGetValueWhenBothAreConfigured() { .put("cluster.no_master_block", "metadata_write") .build(); assertEquals( - NoClusterManagerBlockService.NO_MASTER_BLOCK_ALL, + NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ALL, NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.get(settings) ); assertEquals( - NoClusterManagerBlockService.NO_MASTER_BLOCK_METADATA_WRITES, + NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_METADATA_WRITES, NoClusterManagerBlockService.NO_MASTER_BLOCK_SETTING.get(settings) ); assertSettingDeprecationsAndWarnings(new Setting[] { NoClusterManagerBlockService.NO_MASTER_BLOCK_SETTING }); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/NoClusterManagerBlockServiceTests.java b/server/src/test/java/org/opensearch/cluster/coordination/NoClusterManagerBlockServiceTests.java index b9a72e00aebb0..d9ff21204387d 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/NoClusterManagerBlockServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/NoClusterManagerBlockServiceTests.java @@ -35,10 +35,10 @@ import org.opensearch.common.settings.Settings; import org.opensearch.test.OpenSearchTestCase; -import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_MASTER_BLOCK_ALL; -import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_MASTER_BLOCK_METADATA_WRITES; +import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ALL; +import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_METADATA_WRITES; import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING; -import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_MASTER_BLOCK_WRITES; +import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_WRITES; import static org.opensearch.common.settings.ClusterSettings.BUILT_IN_CLUSTER_SETTINGS; import static org.hamcrest.Matchers.sameInstance; @@ -61,22 +61,22 @@ private void assertDeprecatedWarningEmitted() { public void testBlocksWritesByDefault() { createService(Settings.EMPTY); - assertThat(noClusterManagerBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_METADATA_WRITES)); + assertThat(noClusterManagerBlockService.getNoClusterManagerBlock(), sameInstance(NO_CLUSTER_MANAGER_BLOCK_METADATA_WRITES)); } public void testBlocksWritesIfConfiguredBySetting() { createService(Settings.builder().put(NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "write").build()); - assertThat(noClusterManagerBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_WRITES)); + assertThat(noClusterManagerBlockService.getNoClusterManagerBlock(), sameInstance(NO_CLUSTER_MANAGER_BLOCK_WRITES)); } public void testBlocksAllIfConfiguredBySetting() { createService(Settings.builder().put(NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "all").build()); - assertThat(noClusterManagerBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_ALL)); + assertThat(noClusterManagerBlockService.getNoClusterManagerBlock(), sameInstance(NO_CLUSTER_MANAGER_BLOCK_ALL)); } public void testBlocksMetadataWritesIfConfiguredBySetting() { createService(Settings.builder().put(NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "metadata_write").build()); - assertThat(noClusterManagerBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_METADATA_WRITES)); + assertThat(noClusterManagerBlockService.getNoClusterManagerBlock(), sameInstance(NO_CLUSTER_MANAGER_BLOCK_METADATA_WRITES)); } public void testRejectsInvalidSetting() { @@ -88,12 +88,12 @@ public void testRejectsInvalidSetting() { public void testSettingCanBeUpdated() { createService(Settings.builder().put(NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "all").build()); - assertThat(noClusterManagerBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_ALL)); + assertThat(noClusterManagerBlockService.getNoClusterManagerBlock(), sameInstance(NO_CLUSTER_MANAGER_BLOCK_ALL)); clusterSettings.applySettings(Settings.builder().put(NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "write").build()); - assertThat(noClusterManagerBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_WRITES)); + assertThat(noClusterManagerBlockService.getNoClusterManagerBlock(), sameInstance(NO_CLUSTER_MANAGER_BLOCK_WRITES)); clusterSettings.applySettings(Settings.builder().put(NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "metadata_write").build()); - assertThat(noClusterManagerBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_METADATA_WRITES)); + assertThat(noClusterManagerBlockService.getNoClusterManagerBlock(), sameInstance(NO_CLUSTER_MANAGER_BLOCK_METADATA_WRITES)); } } diff --git a/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java b/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java index 830a7d2a557f2..62dae0622eb85 100644 --- a/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java @@ -319,7 +319,7 @@ public void offMaster() { nodes = state.nodes(); nodesBuilder = DiscoveryNodes.builder(nodes).clusterManagerNodeId(null); state = ClusterState.builder(state) - .blocks(ClusterBlocks.builder().addGlobalBlock(NoClusterManagerBlockService.NO_MASTER_BLOCK_WRITES)) + .blocks(ClusterBlocks.builder().addGlobalBlock(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_WRITES)) .nodes(nodesBuilder) .build(); setState(timedClusterApplierService, state); diff --git a/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java b/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java index 8827064974a19..e39520ce72568 100644 --- a/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java @@ -92,6 +92,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.is; public class MasterServiceTests extends OpenSearchTestCase { @@ -1098,6 +1099,18 @@ public void onAckTimeout() { } } + public void testDeprecatedMasterServiceUpdateTaskThreadName() { + Thread.currentThread().setName(MasterService.MASTER_UPDATE_THREAD_NAME); + assertThat(MasterService.assertClusterManagerUpdateThread(), is(Boolean.TRUE)); + assertThrows(AssertionError.class, () -> MasterService.assertNotClusterManagerUpdateThread("test")); + Thread.currentThread().setName(MasterService.CLUSTER_MANAGER_UPDATE_THREAD_NAME); + assertThat(MasterService.assertClusterManagerUpdateThread(), is(Boolean.TRUE)); + assertThrows(AssertionError.class, () -> MasterService.assertNotClusterManagerUpdateThread("test")); + Thread.currentThread().setName("test not cluster manager update thread"); + assertThat(MasterService.assertNotClusterManagerUpdateThread("test"), is(Boolean.TRUE)); + assertThrows(AssertionError.class, () -> MasterService.assertClusterManagerUpdateThread()); + } + /** * Returns the cluster state that the cluster-manager service uses (and that is provided by the discovery layer) */ diff --git a/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index 939e8b2d4e782..1f2360abde2ad 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -370,22 +370,26 @@ public ClusterState randomlyUpdateClusterState( Supplier indicesServiceSupplier ) { // randomly remove no_cluster_manager blocks - if (randomBoolean() && state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID)) { + if (randomBoolean() && state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ID)) { state = ClusterState.builder(state) - .blocks(ClusterBlocks.builder().blocks(state.blocks()).removeGlobalBlock(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID)) + .blocks( + ClusterBlocks.builder() + .blocks(state.blocks()) + .removeGlobalBlock(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ID) + ) .build(); } // randomly add no_cluster_manager blocks - if (rarely() && state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID) == false) { + if (rarely() && state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ID) == false) { ClusterBlock block = randomBoolean() - ? NoClusterManagerBlockService.NO_MASTER_BLOCK_ALL - : NoClusterManagerBlockService.NO_MASTER_BLOCK_WRITES; + ? NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ALL + : NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_WRITES; state = ClusterState.builder(state).blocks(ClusterBlocks.builder().blocks(state.blocks()).addGlobalBlock(block)).build(); } // if no_cluster_manager block is in place, make no other cluster state changes - if (state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_MASTER_BLOCK_ID)) { + if (state.blocks().hasGlobalBlockWithId(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ID)) { return state; } diff --git a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java index 36469b2ee1999..d6ec70238f70d 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java @@ -139,7 +139,7 @@ import static org.opensearch.cluster.coordination.LeaderChecker.LEADER_CHECK_INTERVAL_SETTING; import static org.opensearch.cluster.coordination.LeaderChecker.LEADER_CHECK_RETRY_COUNT_SETTING; import static org.opensearch.cluster.coordination.LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING; -import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_MASTER_BLOCK_ID; +import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ID; import static org.opensearch.cluster.coordination.Reconfigurator.CLUSTER_AUTO_SHRINK_VOTING_CONFIGURATION; import static org.opensearch.discovery.PeerFinder.DISCOVERY_FIND_PEERS_INTERVAL_SETTING; import static org.opensearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; @@ -567,7 +567,7 @@ void stabilise(long stabilisationDurationMillis) { assertTrue(leaderId + " exists in its last-applied state", leader.getLastAppliedClusterState().getNodes().nodeExists(leaderId)); assertThat( leaderId + " has no NO_CLUSTER_MANAGER_BLOCK", - leader.getLastAppliedClusterState().blocks().hasGlobalBlockWithId(NO_MASTER_BLOCK_ID), + leader.getLastAppliedClusterState().blocks().hasGlobalBlockWithId(NO_CLUSTER_MANAGER_BLOCK_ID), equalTo(false) ); assertThat( @@ -622,7 +622,7 @@ void stabilise(long stabilisationDurationMillis) { ); assertThat( nodeId + " has no NO_CLUSTER_MANAGER_BLOCK", - clusterNode.getLastAppliedClusterState().blocks().hasGlobalBlockWithId(NO_MASTER_BLOCK_ID), + clusterNode.getLastAppliedClusterState().blocks().hasGlobalBlockWithId(NO_CLUSTER_MANAGER_BLOCK_ID), equalTo(false) ); assertThat( @@ -639,7 +639,7 @@ void stabilise(long stabilisationDurationMillis) { ); assertThat( nodeId + " has NO_CLUSTER_MANAGER_BLOCK", - clusterNode.getLastAppliedClusterState().blocks().hasGlobalBlockWithId(NO_MASTER_BLOCK_ID), + clusterNode.getLastAppliedClusterState().blocks().hasGlobalBlockWithId(NO_CLUSTER_MANAGER_BLOCK_ID), equalTo(true) ); assertFalse( diff --git a/test/framework/src/main/java/org/opensearch/test/RandomObjects.java b/test/framework/src/main/java/org/opensearch/test/RandomObjects.java index a0227c51c8085..1f3500dccce8f 100644 --- a/test/framework/src/main/java/org/opensearch/test/RandomObjects.java +++ b/test/framework/src/main/java/org/opensearch/test/RandomObjects.java @@ -331,7 +331,7 @@ private static Tuple randomShardInfoFailure(Random random) { int type = randomIntBetween(random, 0, 3); switch (type) { case 0: - actualException = new ClusterBlockException(singleton(NoClusterManagerBlockService.NO_MASTER_BLOCK_WRITES)); + actualException = new ClusterBlockException(singleton(NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_WRITES)); expectedException = new OpenSearchException( "OpenSearch exception [type=cluster_block_exception, " + "reason=blocked by: [SERVICE_UNAVAILABLE/2/no cluster-manager];]" From 82ff463fd1bc07c241c8674b72182f2875a60425 Mon Sep 17 00:00:00 2001 From: Gaurav Bafna <85113518+gbbafna@users.noreply.github.com> Date: Fri, 29 Jul 2022 15:21:56 +0530 Subject: [PATCH 043/104] Making shard copy count a multiple of attribute count (#3462) * Making all copies a multiple of attribute count Signed-off-by: Gaurav Bafna --- .../admin/indices/rollover/RolloverIT.java | 43 +++++++ .../allocation/AwarenessAllocationIT.java | 8 ++ .../settings/UpdateNumberOfReplicasIT.java | 46 ++++++- .../template/SimpleIndexTemplateIT.java | 47 ++++++-- .../snapshots/RestoreSnapshotIT.java | 66 ++++++++-- .../metadata/MetadataCreateIndexService.java | 31 ++++- .../MetadataIndexTemplateService.java | 6 +- .../MetadataUpdateSettingsService.java | 19 ++- .../allocation/AwarenessReplicaBalance.java | 114 ++++++++++++++++++ .../common/settings/ClusterSettings.java | 2 + .../main/java/org/opensearch/node/Node.java | 9 +- .../MetadataRolloverServiceTests.java | 10 +- .../MetadataCreateIndexServiceTests.java | 65 +++++++++- .../MetadataIndexTemplateServiceTests.java | 18 ++- .../allocation/AwarenessAllocationTests.java | 1 + .../AwarenessReplicaBalanceTests.java | 66 ++++++++++ .../indices/cluster/ClusterStateChanges.java | 11 +- .../snapshots/SnapshotResiliencyTests.java | 4 +- .../test/OpenSearchIntegTestCase.java | 23 ++++ 19 files changed, 551 insertions(+), 38 deletions(-) create mode 100644 server/src/main/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalance.java create mode 100644 server/src/test/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalanceTests.java diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/rollover/RolloverIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/rollover/RolloverIT.java index 178dc2c6ffa87..d0ff3ef19a028 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/rollover/RolloverIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/rollover/RolloverIT.java @@ -61,6 +61,7 @@ import java.util.List; import java.util.Set; +import static org.hamcrest.Matchers.containsString; import static org.opensearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.containsInAnyOrder; @@ -172,6 +173,7 @@ public void testRolloverWithNoWriteIndex() { } public void testRolloverWithIndexSettings() throws Exception { + Alias testAlias = new Alias("test_alias"); boolean explicitWriteIndex = randomBoolean(); if (explicitWriteIndex) { @@ -210,6 +212,47 @@ public void testRolloverWithIndexSettings() throws Exception { } } + public void testRolloverWithIndexSettingsBalancedReplica() throws Exception { + Alias testAlias = new Alias("test_alias"); + boolean explicitWriteIndex = randomBoolean(); + if (explicitWriteIndex) { + testAlias.writeIndex(true); + } + assertAcked(prepareCreate("test_index-2").addAlias(testAlias).get()); + manageReplicaBalanceSetting(true); + index("test_index-2", "type1", "1", "field", "value"); + flush("test_index-2"); + final Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build(); + + final IllegalArgumentException restoreError = expectThrows( + IllegalArgumentException.class, + () -> client().admin().indices().prepareRolloverIndex("test_alias").settings(settings).alias(new Alias("extra_alias")).get() + ); + + assertThat( + restoreError.getMessage(), + containsString("expected total copies needs to be a multiple of total awareness attributes [2]") + ); + + final Settings balancedReplicaSettings = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .build(); + + client().admin() + .indices() + .prepareRolloverIndex("test_alias") + .settings(balancedReplicaSettings) + .alias(new Alias("extra_alias")) + .waitForActiveShards(0) + .get(); + + manageReplicaBalanceSetting(false); + } + public void testRolloverWithIndexSettingsWithoutPrefix() throws Exception { Alias testAlias = new Alias("test_alias"); boolean explicitWriteIndex = randomBoolean(); diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/AwarenessAllocationIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/AwarenessAllocationIT.java index 87d15bf9d3750..a8d4d016ce288 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/AwarenessAllocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/AwarenessAllocationIT.java @@ -36,12 +36,14 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexMetadata.State; import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.allocation.AwarenessReplicaBalance; import org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.opensearch.common.Priority; import org.opensearch.common.settings.Settings; @@ -77,6 +79,12 @@ public void testSimpleAwareness() throws Exception { logger.info("--> starting 2 nodes on the same rack"); internalCluster().startNodes(2, Settings.builder().put(commonSettings).put("node.attr.rack_id", "rack_1").build()); + Settings settings = Settings.builder() + .put(AwarenessReplicaBalance.CLUSTER_ROUTING_ALLOCATION_AWARENESS_BALANCE_SETTING.getKey(), false) + .build(); + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings(settings); + createIndex("test1"); createIndex("test2"); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/settings/UpdateNumberOfReplicasIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/settings/UpdateNumberOfReplicasIT.java index f78ecd82834c2..98001b447e8b2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/settings/UpdateNumberOfReplicasIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/settings/UpdateNumberOfReplicasIT.java @@ -45,11 +45,11 @@ import java.io.IOException; import java.util.EnumSet; +import static org.hamcrest.Matchers.equalTo; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; -import static org.hamcrest.Matchers.equalTo; @OpenSearchIntegTestCase.ClusterScope(minNumDataNodes = 2) public class UpdateNumberOfReplicasIT extends OpenSearchIntegTestCase { @@ -606,4 +606,48 @@ public void testUpdateNumberOfReplicasAllowNoIndices() { assertThat(numberOfReplicas, equalTo(0)); } + public void testAwarenessReplicaBalance() { + createIndex("aware-replica", Settings.builder().put("index.number_of_replicas", 0).build()); + createIndex(".system-index", Settings.builder().put("index.number_of_replicas", 0).build()); + manageReplicaBalanceSetting(true); + int updated = 0; + + try { + // replica count of 1 is ideal + client().admin() + .indices() + .prepareUpdateSettings("aware-replica") + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)) + .execute() + .actionGet(); + updated++; + + // system index - should be able to update + client().admin() + .indices() + .prepareUpdateSettings(".system-index") + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2)) + .execute() + .actionGet(); + updated++; + + client().admin() + .indices() + .prepareUpdateSettings("aware-replica") + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2)) + .execute() + .actionGet(); + fail("should have thrown an exception about the replica count"); + + } catch (IllegalArgumentException e) { + assertEquals( + "Validation Failed: 1: expected total copies needs to be a multiple of total awareness attributes [2];", + e.getMessage() + ); + assertEquals(2, updated); + } finally { + manageReplicaBalanceSetting(false); + } + } + } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/template/SimpleIndexTemplateIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/template/SimpleIndexTemplateIT.java index 0e15a0c895432..42c0145676f2d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/template/SimpleIndexTemplateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/template/SimpleIndexTemplateIT.java @@ -32,18 +32,19 @@ package org.opensearch.indices.template; +import org.junit.After; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.admin.indices.alias.get.GetAliasesResponse; import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse; import org.opensearch.action.admin.indices.template.get.GetIndexTemplatesResponse; import org.opensearch.action.admin.indices.template.put.PutIndexTemplateRequestBuilder; - import org.opensearch.action.bulk.BulkResponse; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.AliasMetadata; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.ParsingException; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.settings.Settings; @@ -52,12 +53,11 @@ import org.opensearch.index.mapper.MapperParsingException; import org.opensearch.index.query.QueryBuilders; import org.opensearch.indices.InvalidAliasNameException; +import org.opensearch.indices.InvalidIndexTemplateException; import org.opensearch.plugins.Plugin; import org.opensearch.search.SearchHit; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalSettingsPlugin; - -import org.junit.After; +import org.opensearch.test.OpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; @@ -68,11 +68,6 @@ import java.util.List; import java.util.Set; -import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; -import static org.opensearch.index.query.QueryBuilders.termQuery; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertRequestBuilderThrows; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; @@ -83,6 +78,11 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertRequestBuilderThrows; public class SimpleIndexTemplateIT extends OpenSearchIntegTestCase { @@ -1029,4 +1029,33 @@ public void testPartitionedTemplate() throws Exception { GetSettingsResponse getSettingsResponse = client().admin().indices().prepareGetSettings("test_good").get(); assertEquals("6", getSettingsResponse.getIndexToSettings().get("test_good").get("index.routing_partition_size")); } + + public void testAwarenessReplicaBalance() throws IOException { + manageReplicaBalanceSetting(true); + try { + client().admin() + .indices() + .preparePutTemplate("template_1") + .setPatterns(Arrays.asList("a*", "b*")) + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)) + .get(); + + client().admin() + .indices() + .preparePutTemplate("template_1") + .setPatterns(Arrays.asList("a*", "b*")) + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)) + .get(); + + fail("should have thrown an exception about the replica count"); + } catch (InvalidIndexTemplateException e) { + assertEquals( + "index_template [template_1] invalid, cause [Validation Failed: 1: expected total copies needs to be a multiple of total awareness attributes [2];]", + e.getMessage() + ); + } finally { + manageReplicaBalanceSetting(false); + } + } + } diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java index 3a7fdd4093657..a40378b9c2dfa 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java @@ -59,6 +59,14 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.index.IndexSettings.INDEX_REFRESH_INTERVAL_SETTING; @@ -70,14 +78,6 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertIndexTemplateExists; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertIndexTemplateMissing; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertRequestBuilderThrows; -import static org.hamcrest.Matchers.allOf; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; public class RestoreSnapshotIT extends AbstractSnapshotIntegTestCase { @@ -973,4 +973,54 @@ public void testForbidDisableSoftDeletesDuringRestore() throws Exception { ); assertThat(restoreError.getMessage(), containsString("cannot disable setting [index.soft_deletes.enabled] on restore")); } + + public void testRestoreBalancedReplica() { + try { + createRepository("test-repo", "fs"); + createIndex("test-index", Settings.builder().put("index.number_of_replicas", 0).build()); + createIndex(".system-index", Settings.builder().put("index.number_of_replicas", 0).build()); + ensureGreen(); + clusterAdmin().prepareCreateSnapshot("test-repo", "snapshot-0") + .setIndices("test-index", ".system-index") + .setWaitForCompletion(true) + .get(); + manageReplicaBalanceSetting(true); + + final IllegalArgumentException restoreError = expectThrows( + IllegalArgumentException.class, + () -> clusterAdmin().prepareRestoreSnapshot("test-repo", "snapshot-0") + .setRenamePattern("test-index") + .setRenameReplacement("new-index") + .setIndices("test-index") + .get() + ); + assertThat( + restoreError.getMessage(), + containsString("expected total copies needs to be a multiple of total awareness attributes [2]") + ); + + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "snapshot-0") + .setRenamePattern(".system-index") + .setRenameReplacement(".system-index-restore-1") + .setWaitForCompletion(true) + .setIndices(".system-index") + .execute() + .actionGet(); + + assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); + + restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "snapshot-0") + .setRenamePattern("test-index") + .setRenameReplacement("new-index") + .setIndexSettings(Settings.builder().put("index.number_of_replicas", 1).build()) + .setWaitForCompletion(true) + .setIndices("test-index") + .execute() + .actionGet(); + + assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); + } finally { + manageReplicaBalanceSetting(false); + } + } } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index 642b0f7b8d36f..d78e5e872fd2b 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -59,6 +59,7 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.allocation.AllocationService; +import org.opensearch.cluster.routing.allocation.AwarenessReplicaBalance; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; import org.opensearch.common.Priority; @@ -102,6 +103,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiConsumer; @@ -145,6 +147,7 @@ public class MetadataCreateIndexService { private final ShardLimitValidator shardLimitValidator; private final boolean forbidPrivateIndexSettings; private final Set indexSettingProviders = new HashSet<>(); + private AwarenessReplicaBalance awarenessReplicaBalance; public MetadataCreateIndexService( final Settings settings, @@ -158,7 +161,8 @@ public MetadataCreateIndexService( final ThreadPool threadPool, final NamedXContentRegistry xContentRegistry, final SystemIndices systemIndices, - final boolean forbidPrivateIndexSettings + final boolean forbidPrivateIndexSettings, + final AwarenessReplicaBalance awarenessReplicaBalance ) { this.settings = settings; this.clusterService = clusterService; @@ -172,6 +176,7 @@ public MetadataCreateIndexService( this.systemIndices = systemIndices; this.forbidPrivateIndexSettings = forbidPrivateIndexSettings; this.shardLimitValidator = shardLimitValidator; + this.awarenessReplicaBalance = awarenessReplicaBalance; } /** @@ -1150,7 +1155,7 @@ private void validate(CreateIndexClusterStateUpdateRequest request, ClusterState public void validateIndexSettings(String indexName, final Settings settings, final boolean forbidPrivateIndexSettings) throws IndexCreationException { - List validationErrors = getIndexSettingsValidationErrors(settings, forbidPrivateIndexSettings); + List validationErrors = getIndexSettingsValidationErrors(settings, forbidPrivateIndexSettings, indexName); if (validationErrors.isEmpty() == false) { ValidationException validationException = new ValidationException(); @@ -1159,11 +1164,31 @@ public void validateIndexSettings(String indexName, final Settings settings, fin } } - List getIndexSettingsValidationErrors(final Settings settings, final boolean forbidPrivateIndexSettings) { + List getIndexSettingsValidationErrors(final Settings settings, final boolean forbidPrivateIndexSettings, String indexName) { + List validationErrors = getIndexSettingsValidationErrors(settings, forbidPrivateIndexSettings, Optional.of(indexName)); + return validationErrors; + } + + List getIndexSettingsValidationErrors( + final Settings settings, + final boolean forbidPrivateIndexSettings, + Optional indexName + ) { List validationErrors = validateIndexCustomPath(settings, env.sharedDataDir()); if (forbidPrivateIndexSettings) { validationErrors.addAll(validatePrivateSettingsNotExplicitlySet(settings, indexScopedSettings)); } + if (indexName.isEmpty() || indexName.get().charAt(0) != '.') { + // Apply aware replica balance only to non system indices + int replicaCount = settings.getAsInt( + IndexMetadata.SETTING_NUMBER_OF_REPLICAS, + INDEX_NUMBER_OF_REPLICAS_SETTING.getDefault(Settings.EMPTY) + ); + Optional error = awarenessReplicaBalance.validate(replicaCount); + if (error.isPresent()) { + validationErrors.add(error.get()); + } + } return validationErrors; } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java index 1a0f4f0f83e00..7e91b491a234c 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java @@ -1471,7 +1471,11 @@ private void validate(String name, @Nullable Settings settings, List ind validationErrors.add(t.getMessage()); } } - List indexSettingsValidation = metadataCreateIndexService.getIndexSettingsValidationErrors(settings, true); + List indexSettingsValidation = metadataCreateIndexService.getIndexSettingsValidationErrors( + settings, + true, + Optional.empty() + ); validationErrors.addAll(indexSettingsValidation); } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java index 1390860271577..eb142be815d27 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java @@ -46,6 +46,7 @@ import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.cluster.routing.allocation.AllocationService; +import org.opensearch.cluster.routing.allocation.AwarenessReplicaBalance; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Priority; import org.opensearch.common.ValidationException; @@ -89,6 +90,8 @@ public class MetadataUpdateSettingsService { private final ShardLimitValidator shardLimitValidator; private final ThreadPool threadPool; + private AwarenessReplicaBalance awarenessReplicaBalance; + @Inject public MetadataUpdateSettingsService( ClusterService clusterService, @@ -96,7 +99,8 @@ public MetadataUpdateSettingsService( IndexScopedSettings indexScopedSettings, IndicesService indicesService, ShardLimitValidator shardLimitValidator, - ThreadPool threadPool + ThreadPool threadPool, + AwarenessReplicaBalance awarenessReplicaBalance ) { this.clusterService = clusterService; this.threadPool = threadPool; @@ -104,6 +108,7 @@ public MetadataUpdateSettingsService( this.indexScopedSettings = indexScopedSettings; this.indicesService = indicesService; this.shardLimitValidator = shardLimitValidator; + this.awarenessReplicaBalance = awarenessReplicaBalance; } public void updateSettings( @@ -193,6 +198,18 @@ public ClusterState execute(ClusterState currentState) { if (IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.exists(openSettings)) { final int updatedNumberOfReplicas = IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(openSettings); if (preserveExisting == false) { + for (Index index : request.indices()) { + if (index.getName().charAt(0) != '.') { + // No replica count validation for system indices + Optional error = awarenessReplicaBalance.validate(updatedNumberOfReplicas); + if (error.isPresent()) { + ValidationException ex = new ValidationException(); + ex.addValidationError(error.get()); + throw ex; + } + } + } + // Verify that this won't take us over the cluster shard limit. int totalNewShards = Arrays.stream(request.indices()) .mapToInt(i -> getTotalNewShards(i, currentState, updatedNumberOfReplicas)) diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalance.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalance.java new file mode 100644 index 0000000000000..accf0b69a4f0e --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalance.java @@ -0,0 +1,114 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing.allocation; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; + +import static java.lang.Math.max; +import static org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING; +import static org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING; + +/** + * This {@link AwarenessReplicaBalance} gives total unique values of awareness attributes + * It takes in effect only iff cluster.routing.allocation.awareness.attributes and + * cluster.routing.allocation.awareness.force.zone.values both are specified. + * + * This is used in enforcing total copy of shard is a maximum of unique values of awareness attributes + * Helps in balancing shards across all awareness attributes and ensuring high availability of data. + */ +public class AwarenessReplicaBalance { + public static final Setting CLUSTER_ROUTING_ALLOCATION_AWARENESS_BALANCE_SETTING = Setting.boolSetting( + "cluster.routing.allocation.awareness.balance", + false, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + private volatile List awarenessAttributes; + + private volatile Map> forcedAwarenessAttributes; + + private volatile Boolean awarenessBalance; + + public AwarenessReplicaBalance(Settings settings, ClusterSettings clusterSettings) { + this.awarenessAttributes = CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING, this::setAwarenessAttributes); + setForcedAwarenessAttributes(CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.get(settings)); + clusterSettings.addSettingsUpdateConsumer( + CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING, + this::setForcedAwarenessAttributes + ); + setAwarenessBalance(CLUSTER_ROUTING_ALLOCATION_AWARENESS_BALANCE_SETTING.get(settings)); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_AWARENESS_BALANCE_SETTING, this::setAwarenessBalance); + + } + + private void setAwarenessBalance(Boolean awarenessBalance) { + this.awarenessBalance = awarenessBalance; + } + + private void setForcedAwarenessAttributes(Settings forceSettings) { + Map> forcedAwarenessAttributes = new HashMap<>(); + Map forceGroups = forceSettings.getAsGroups(); + for (Map.Entry entry : forceGroups.entrySet()) { + List aValues = entry.getValue().getAsList("values"); + if (aValues.size() > 0) { + forcedAwarenessAttributes.put(entry.getKey(), aValues); + } + } + this.forcedAwarenessAttributes = forcedAwarenessAttributes; + } + + private void setAwarenessAttributes(List awarenessAttributes) { + this.awarenessAttributes = awarenessAttributes; + } + + /* + For a cluster having zone as awareness attribute , it will return the size of zones if set it forced awareness attributes + + If there are multiple forced awareness attributes, it will return size of the largest list, as all copies of data + is supposed to get distributed amongst those. + + cluster.routing.allocation.awareness.attributes: rack_id , zone + cluster.routing.allocation.awareness.force.zone.values: zone1, zone2 + cluster.routing.allocation.awareness.force.rack_id.values: rack_id1, rack_id2, rack_id3 + + In this case, awareness attributes would be 3. + */ + public int maxAwarenessAttributes() { + int awarenessAttributes = 1; + if (this.awarenessBalance == false) { + return awarenessAttributes; + } + for (String awarenessAttribute : this.awarenessAttributes) { + if (forcedAwarenessAttributes.containsKey(awarenessAttribute)) { + awarenessAttributes = max(awarenessAttributes, forcedAwarenessAttributes.get(awarenessAttribute).size()); + } + } + return awarenessAttributes; + } + + public Optional validate(int replicaCount) { + if ((replicaCount + 1) % maxAwarenessAttributes() != 0) { + String errorMessage = "expected total copies needs to be a multiple of total awareness attributes [" + + maxAwarenessAttributes() + + "]"; + return Optional.of(errorMessage); + } + return Optional.empty(); + } + +} diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 11cb2ca316235..600c42d036fa1 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -32,6 +32,7 @@ package org.opensearch.common.settings; import org.apache.logging.log4j.LogManager; +import org.opensearch.cluster.routing.allocation.AwarenessReplicaBalance; import org.opensearch.action.search.CreatePitController; import org.opensearch.cluster.routing.allocation.decider.NodeLoadAwareAllocationDecider; import org.opensearch.index.IndexModule; @@ -219,6 +220,7 @@ public void apply(Settings value, Settings current, Settings previous) { Arrays.asList( AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING, AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING, + AwarenessReplicaBalance.CLUSTER_ROUTING_ALLOCATION_AWARENESS_BALANCE_SETTING, BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING, BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING, BalancedShardsAllocator.SHARD_MOVE_PRIMARY_FIRST_SETTING, diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 6cc8128cffd52..7e90d82c4b663 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -37,6 +37,7 @@ import org.apache.lucene.util.Constants; import org.apache.lucene.util.SetOnce; import org.opensearch.common.util.FeatureFlags; +import org.opensearch.cluster.routing.allocation.AwarenessReplicaBalance; import org.opensearch.index.IndexingPressureService; import org.opensearch.indices.replication.SegmentReplicationSourceFactory; import org.opensearch.indices.replication.SegmentReplicationTargetService; @@ -652,6 +653,10 @@ protected Node( final AliasValidator aliasValidator = new AliasValidator(); final ShardLimitValidator shardLimitValidator = new ShardLimitValidator(settings, clusterService, systemIndices); + final AwarenessReplicaBalance awarenessReplicaBalance = new AwarenessReplicaBalance( + settings, + clusterService.getClusterSettings() + ); final MetadataCreateIndexService metadataCreateIndexService = new MetadataCreateIndexService( settings, clusterService, @@ -664,7 +669,8 @@ protected Node( threadPool, xContentRegistry, systemIndices, - forbidPrivateIndexSettings + forbidPrivateIndexSettings, + awarenessReplicaBalance ); pluginsService.filterPlugins(Plugin.class) .forEach( @@ -921,6 +927,7 @@ protected Node( b.bind(IndicesService.class).toInstance(indicesService); b.bind(AliasValidator.class).toInstance(aliasValidator); b.bind(MetadataCreateIndexService.class).toInstance(metadataCreateIndexService); + b.bind(AwarenessReplicaBalance.class).toInstance(awarenessReplicaBalance); b.bind(MetadataCreateDataStreamService.class).toInstance(metadataCreateDataStreamService); b.bind(SearchService.class).toInstance(searchService); b.bind(SearchTransportService.class).toInstance(searchTransportService); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java b/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java index afe35538adaf5..d54c17041e96f 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java @@ -57,6 +57,7 @@ import org.opensearch.cluster.metadata.MetadataIndexAliasesService; import org.opensearch.cluster.metadata.Template; import org.opensearch.cluster.routing.allocation.AllocationService; +import org.opensearch.cluster.routing.allocation.AwarenessReplicaBalance; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.CheckedFunction; import org.opensearch.common.Strings; @@ -617,7 +618,8 @@ public void testRolloverClusterState() throws Exception { testThreadPool, null, systemIndices, - false + false, + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) ); MetadataIndexAliasesService indexAliasesService = new MetadataIndexAliasesService( clusterService, @@ -754,7 +756,8 @@ public void testRolloverClusterStateForDataStream() throws Exception { testThreadPool, null, systemIndices, - false + false, + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) ); MetadataIndexAliasesService indexAliasesService = new MetadataIndexAliasesService( clusterService, @@ -931,7 +934,8 @@ public void testRolloverClusterStateForDataStreamNoTemplate() throws Exception { testThreadPool, null, new SystemIndices(emptyMap()), - false + false, + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) ); MetadataIndexAliasesService indexAliasesService = new MetadataIndexAliasesService( clusterService, diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index 89550b491500d..ac03ab49bbbbd 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -51,13 +51,16 @@ import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.cluster.routing.allocation.AllocationService; +import org.opensearch.cluster.routing.allocation.AwarenessReplicaBalance; import org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Strings; import org.opensearch.common.collect.ImmutableOpenMap; import org.opensearch.common.compress.CompressedXContent; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; @@ -95,6 +98,7 @@ import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.UUID; import java.util.concurrent.atomic.AtomicBoolean; @@ -108,7 +112,6 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.singleton; import static java.util.Collections.singletonList; -import static org.opensearch.index.IndexSettings.INDEX_SOFT_DELETES_SETTING; import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasKey; @@ -117,6 +120,8 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.startsWith; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING; import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING; import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_READ_ONLY_BLOCK; @@ -130,6 +135,7 @@ import static org.opensearch.cluster.metadata.MetadataCreateIndexService.getIndexNumberOfRoutingShards; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.parseV1Mappings; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.resolveAndValidateAliases; +import static org.opensearch.index.IndexSettings.INDEX_SOFT_DELETES_SETTING; import static org.opensearch.indices.ShardLimitValidatorTests.createTestShardLimitService; public class MetadataCreateIndexServiceTests extends OpenSearchTestCase { @@ -592,7 +598,8 @@ public void testValidateIndexName() throws Exception { threadPool, null, new SystemIndices(Collections.emptyMap()), - false + false, + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) ); validateIndexName(checkerService, "index?name", "must not contain the following characters " + Strings.INVALID_FILENAME_CHARS); @@ -673,7 +680,8 @@ public void testValidateDotIndex() { threadPool, null, new SystemIndices(Collections.singletonMap("foo", systemIndexDescriptors)), - false + false, + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) ); // Check deprecations assertFalse(checkerService.validateDotIndex(".test2", false)); @@ -1064,6 +1072,52 @@ public void testParseMappingsWithTypelessTemplate() throws Exception { assertThat(mappings, Matchers.hasKey(MapperService.SINGLE_MAPPING_NAME)); } + public void testvalidateIndexSettings() { + ClusterService clusterService = mock(ClusterService.class); + ThreadPool threadPool = new TestThreadPool(getTestName()); + Settings settings = Settings.builder() + .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(), "zone, rack") + .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "zone.values", "a, b") + .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "rack.values", "c, d, e") + .put(AwarenessReplicaBalance.CLUSTER_ROUTING_ALLOCATION_AWARENESS_BALANCE_SETTING.getKey(), true) + .build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + when(clusterService.getSettings()).thenReturn(settings); + when(clusterService.getClusterSettings()).thenReturn(clusterSettings); + MetadataCreateIndexService checkerService = new MetadataCreateIndexService( + settings, + clusterService, + null, + null, + null, + createTestShardLimitService(randomIntBetween(1, 1000), clusterService), + new Environment(Settings.builder().put("path.home", "dummy").build(), null), + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + threadPool, + null, + new SystemIndices(Collections.emptyMap()), + true, + new AwarenessReplicaBalance(settings, clusterService.getClusterSettings()) + ); + + List validationErrors = checkerService.getIndexSettingsValidationErrors(settings, false, Optional.empty()); + assertThat(validationErrors.size(), is(1)); + assertThat(validationErrors.get(0), is("expected total copies needs to be a multiple of total awareness attributes [3]")); + + settings = Settings.builder() + .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(), "zone, rack") + .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "zone.values", "a, b") + .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "rack.values", "c, d, e") + .put(AwarenessReplicaBalance.CLUSTER_ROUTING_ALLOCATION_AWARENESS_BALANCE_SETTING.getKey(), true) + .put(SETTING_NUMBER_OF_REPLICAS, 2) + .build(); + + validationErrors = checkerService.getIndexSettingsValidationErrors(settings, false, Optional.empty()); + assertThat(validationErrors.size(), is(0)); + + threadPool.shutdown(); + } + public void testBuildIndexMetadata() { IndexMetadata sourceIndexMetadata = IndexMetadata.builder("parent") .settings(Settings.builder().put("index.version.created", Version.CURRENT).build()) @@ -1186,10 +1240,11 @@ public void testIndexLifecycleNameSetting() { threadPool, null, new SystemIndices(Collections.emptyMap()), - true + true, + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) ); - final List validationErrors = checkerService.getIndexSettingsValidationErrors(ilnSetting, true); + final List validationErrors = checkerService.getIndexSettingsValidationErrors(ilnSetting, true, Optional.empty()); assertThat(validationErrors.size(), is(1)); assertThat(validationErrors.get(0), is("expected [index.lifecycle.name] to be private but it was not")); })); diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java index c3a16a1e25bc8..887d8469bd01c 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java @@ -38,9 +38,11 @@ import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.MetadataIndexTemplateService.PutRequest; +import org.opensearch.cluster.routing.allocation.AwarenessReplicaBalance; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Strings; import org.opensearch.common.compress.CompressedXContent; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; @@ -85,7 +87,10 @@ import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.matchesRegex; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import static org.opensearch.common.settings.Settings.builder; +import static org.opensearch.env.Environment.PATH_HOME_SETTING; import static org.opensearch.index.mapper.DataStreamFieldMapper.Defaults.TIMESTAMP_FIELD; import static org.opensearch.indices.ShardLimitValidatorTests.createTestShardLimitService; @@ -2084,9 +2089,14 @@ public void testLegacyNoopUpdate() { } private static List putTemplate(NamedXContentRegistry xContentRegistry, PutRequest request) { + ClusterService clusterService = mock(ClusterService.class); + Settings settings = Settings.builder().put(PATH_HOME_SETTING.getKey(), "dummy").build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + when(clusterService.getSettings()).thenReturn(settings); + when(clusterService.getClusterSettings()).thenReturn(clusterSettings); MetadataCreateIndexService createIndexService = new MetadataCreateIndexService( Settings.EMPTY, - null, + clusterService, null, null, null, @@ -2096,7 +2106,8 @@ private static List putTemplate(NamedXContentRegistry xContentRegistr null, xContentRegistry, new SystemIndices(Collections.emptyMap()), - true + true, + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) ); MetadataIndexTemplateService service = new MetadataIndexTemplateService( null, @@ -2158,7 +2169,8 @@ private MetadataIndexTemplateService getMetadataIndexTemplateService() { null, xContentRegistry(), new SystemIndices(Collections.emptyMap()), - true + true, + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) ); return new MetadataIndexTemplateService( clusterService, diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/AwarenessAllocationTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/AwarenessAllocationTests.java index b2adcd21cd8c9..c0cec7e3201bb 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/AwarenessAllocationTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/AwarenessAllocationTests.java @@ -64,6 +64,7 @@ import java.util.Map; import static java.util.Collections.singletonMap; +import static org.hamcrest.MatcherAssert.assertThat; import static org.opensearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.opensearch.cluster.routing.ShardRoutingState.RELOCATING; import static org.opensearch.cluster.routing.ShardRoutingState.STARTED; diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalanceTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalanceTests.java new file mode 100644 index 0000000000000..e2431765709e6 --- /dev/null +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalanceTests.java @@ -0,0 +1,66 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing.allocation; + +import org.opensearch.cluster.OpenSearchAllocationTestCase; +import org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; + +import java.util.Optional; + +import static org.hamcrest.Matchers.equalTo; + +public class AwarenessReplicaBalanceTests extends OpenSearchAllocationTestCase { + + private static final ClusterSettings EMPTY_CLUSTER_SETTINGS = new ClusterSettings( + Settings.EMPTY, + ClusterSettings.BUILT_IN_CLUSTER_SETTINGS + ); + + public void testNoForcedAwarenessAttribute() { + Settings settings = Settings.builder().put("cluster.routing.allocation.awareness.attributes", "rack_id").build(); + + AwarenessReplicaBalance awarenessReplicaBalance = new AwarenessReplicaBalance(settings, EMPTY_CLUSTER_SETTINGS); + assertThat(awarenessReplicaBalance.maxAwarenessAttributes(), equalTo(1)); + + assertEquals(awarenessReplicaBalance.validate(0), Optional.empty()); + assertEquals(awarenessReplicaBalance.validate(1), Optional.empty()); + } + + public void testForcedAwarenessAttribute() { + Settings settings = Settings.builder() + .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(), "zone, rack") + .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "zone.values", "a, b") + .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "rack.values", "c, d, e") + .put(AwarenessReplicaBalance.CLUSTER_ROUTING_ALLOCATION_AWARENESS_BALANCE_SETTING.getKey(), true) + .build(); + + AwarenessReplicaBalance awarenessReplicaBalance = new AwarenessReplicaBalance(settings, EMPTY_CLUSTER_SETTINGS); + assertThat(awarenessReplicaBalance.maxAwarenessAttributes(), equalTo(3)); + assertEquals(awarenessReplicaBalance.validate(2), Optional.empty()); + assertEquals( + awarenessReplicaBalance.validate(1), + Optional.of("expected total copies needs to be a multiple of total awareness attributes [3]") + ); + } + + public void testForcedAwarenessAttributeDisabled() { + Settings settings = Settings.builder() + .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(), "zone, rack") + .put(AwarenessReplicaBalance.CLUSTER_ROUTING_ALLOCATION_AWARENESS_BALANCE_SETTING.getKey(), true) + .build(); + + AwarenessReplicaBalance awarenessReplicaBalance = new AwarenessReplicaBalance(settings, EMPTY_CLUSTER_SETTINGS); + assertThat(awarenessReplicaBalance.maxAwarenessAttributes(), equalTo(1)); + assertEquals(awarenessReplicaBalance.validate(0), Optional.empty()); + assertEquals(awarenessReplicaBalance.validate(1), Optional.empty()); + } + +} diff --git a/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java index 769fdc220bec4..7fe17e570d157 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java @@ -81,6 +81,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.allocation.AllocationService; +import org.opensearch.cluster.routing.allocation.AwarenessReplicaBalance; import org.opensearch.cluster.routing.allocation.FailedShard; import org.opensearch.cluster.routing.allocation.RandomAllocationDeciderTests; import org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; @@ -190,6 +191,7 @@ public ClusterStateChanges(NamedXContentRegistry xContentRegistry, ThreadPool th // mocks clusterService = mock(ClusterService.class); when(clusterService.getClusterSettings()).thenReturn(clusterSettings); + when(clusterService.getSettings()).thenReturn(SETTINGS); IndicesService indicesService = mock(IndicesService.class); // MetadataCreateIndexService uses withTempIndexService to check mappings -> fake it here try { @@ -272,13 +274,17 @@ public IndexMetadata upgradeIndexMetadata(IndexMetadata indexMetadata, Version m transportVerifyShardIndexBlockAction ); MetadataDeleteIndexService deleteIndexService = new MetadataDeleteIndexService(SETTINGS, clusterService, allocationService); + + final AwarenessReplicaBalance awarenessReplicaBalance = new AwarenessReplicaBalance(SETTINGS, clusterService.getClusterSettings()); + MetadataUpdateSettingsService metadataUpdateSettingsService = new MetadataUpdateSettingsService( clusterService, allocationService, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, indicesService, shardLimitValidator, - threadPool + threadPool, + awarenessReplicaBalance ); MetadataCreateIndexService createIndexService = new MetadataCreateIndexService( SETTINGS, @@ -292,7 +298,8 @@ public IndexMetadata upgradeIndexMetadata(IndexMetadata indexMetadata, Version m threadPool, xContentRegistry, systemIndices, - true + true, + awarenessReplicaBalance ); transportCloseIndexAction = new TransportCloseIndexAction( diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index cca27366f7df0..9a7f0e2005555 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -141,6 +141,7 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.cluster.routing.allocation.AllocationService; +import org.opensearch.cluster.routing.allocation.AwarenessReplicaBalance; import org.opensearch.cluster.routing.allocation.command.AllocateEmptyPrimaryAllocationCommand; import org.opensearch.cluster.service.ClusterApplierService; import org.opensearch.cluster.service.ClusterService; @@ -1895,7 +1896,8 @@ public void onFailure(final Exception e) { threadPool, namedXContentRegistry, systemIndices, - false + false, + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) ); actions.put( CreateIndexAction.INSTANCE, diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 0f868efdd5c45..8d8df2fec39f9 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -49,6 +49,7 @@ import org.opensearch.action.admin.cluster.node.hotthreads.NodeHotThreads; import org.opensearch.action.admin.cluster.node.info.NodeInfo; import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; import org.opensearch.action.admin.cluster.tasks.PendingClusterTasksResponse; import org.opensearch.action.admin.indices.create.CreateIndexRequestBuilder; @@ -91,7 +92,9 @@ import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.UnassignedInfo; +import org.opensearch.cluster.routing.allocation.AwarenessReplicaBalance; import org.opensearch.cluster.routing.allocation.DiskThresholdSettings; +import org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; @@ -2408,4 +2411,24 @@ protected boolean willSufferDebian8MemoryProblem() { boolean java15Plus = Runtime.version().compareTo(Version.parse("15")) >= 0; return anyDebian8Nodes && java15Plus == false; } + + public void manageReplicaBalanceSetting(boolean apply) { + Settings settings; + if (apply) { + settings = Settings.builder() + .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(), "zone") + .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "zone.values", "a, b") + .put(AwarenessReplicaBalance.CLUSTER_ROUTING_ALLOCATION_AWARENESS_BALANCE_SETTING.getKey(), true) + .build(); + } else { + settings = Settings.builder() + .putNull(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey()) + .putNull(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "zone.values") + .putNull(AwarenessReplicaBalance.CLUSTER_ROUTING_ALLOCATION_AWARENESS_BALANCE_SETTING.getKey()) + .build(); + } + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings(settings); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + } } From 2a1b239c2af8d7f333a404c66467a50dc0a52c80 Mon Sep 17 00:00:00 2001 From: Rishikesh Pasham <62345295+Rishikesh1159@users.noreply.github.com> Date: Sat, 30 Jul 2022 00:23:11 +0000 Subject: [PATCH 044/104] Deprecate and Rename abstract methods from 'Master' terminology to 'ClusterManager'. (#4032) This commit deprecates and renames the following abstract methods: ``` In class TransportMasterNodeAction - abstract void masterOperation(Request request, ClusterState state, ActionListener listener) In class TransportClusterInfoAction - abstract void doMasterOperation(Request request, String[] concreteIndices, ClusterState state, ActionListener listener) In class TestCluster: public abstract int numDataAndMasterNodes(); ``` and the following concrete methods: ``` In class TransportMasterNodeAction - void masterOperation(Task task, Request request, ClusterState state, ActionListener listener) In class TransportClusterInfoAction - void masterOperation(final Request request, final ClusterState state, final ActionListener listener) ``` Signed-off-by: Rishikesh1159 --- .../repositories/RepositoryBlocksIT.java | 2 +- .../cluster/snapshots/SnapshotBlocksIT.java | 2 +- .../BlobStoreRepositoryCleanupIT.java | 2 +- .../opensearch/snapshots/RepositoriesIT.java | 2 +- ...ansportClusterAllocationExplainAction.java | 2 +- ...nsportAddVotingConfigExclusionsAction.java | 2 +- ...portClearVotingConfigExclusionsAction.java | 2 +- .../health/TransportClusterHealthAction.java | 9 ++++--- .../TransportCleanupRepositoryAction.java | 2 +- .../TransportDeleteRepositoryAction.java | 2 +- .../get/TransportGetRepositoriesAction.java | 2 +- .../put/TransportPutRepositoryAction.java | 2 +- .../TransportVerifyRepositoryAction.java | 2 +- .../TransportClusterRerouteAction.java | 2 +- .../TransportClusterUpdateSettingsAction.java | 2 +- .../TransportClusterSearchShardsAction.java | 2 +- .../clone/TransportCloneSnapshotAction.java | 6 ++++- .../create/TransportCreateSnapshotAction.java | 2 +- .../delete/TransportDeleteSnapshotAction.java | 2 +- .../get/TransportGetSnapshotsAction.java | 2 +- .../TransportRestoreSnapshotAction.java | 2 +- .../TransportSnapshotsStatusAction.java | 2 +- .../state/TransportClusterStateAction.java | 2 +- .../TransportDeleteStoredScriptAction.java | 7 +++-- .../TransportGetStoredScriptAction.java | 7 +++-- .../TransportPutStoredScriptAction.java | 7 +++-- .../TransportPendingClusterTasksAction.java | 2 +- .../alias/TransportIndicesAliasesAction.java | 2 +- .../alias/get/TransportGetAliasesAction.java | 2 +- .../close/TransportCloseIndexAction.java | 4 +-- .../indices/create/AutoCreateAction.java | 6 ++++- .../create/TransportCreateIndexAction.java | 2 +- .../TransportDeleteDanglingIndexAction.java | 2 +- .../datastream/CreateDataStreamAction.java | 2 +- .../datastream/DeleteDataStreamAction.java | 2 +- .../datastream/GetDataStreamAction.java | 2 +- .../delete/TransportDeleteIndexAction.java | 2 +- .../indices/TransportIndicesExistsAction.java | 2 +- .../indices/get/TransportGetIndexAction.java | 2 +- .../get/TransportGetMappingsAction.java | 2 +- .../put/TransportAutoPutMappingAction.java | 2 +- .../put/TransportPutMappingAction.java | 2 +- .../open/TransportOpenIndexAction.java | 2 +- .../TransportAddIndexBlockAction.java | 4 +-- .../rollover/TransportRolloverAction.java | 4 +-- .../get/TransportGetSettingsAction.java | 2 +- .../put/TransportUpdateSettingsAction.java | 2 +- .../TransportIndicesShardStoresAction.java | 2 +- .../indices/shrink/TransportResizeAction.java | 2 +- ...ransportDeleteComponentTemplateAction.java | 2 +- ...rtDeleteComposableIndexTemplateAction.java | 2 +- .../TransportDeleteIndexTemplateAction.java | 2 +- .../TransportGetComponentTemplateAction.java | 2 +- ...sportGetComposableIndexTemplateAction.java | 2 +- .../get/TransportGetIndexTemplatesAction.java | 2 +- .../TransportSimulateIndexTemplateAction.java | 2 +- .../post/TransportSimulateTemplateAction.java | 2 +- .../TransportPutComponentTemplateAction.java | 2 +- ...sportPutComposableIndexTemplateAction.java | 2 +- .../put/TransportPutIndexTemplateAction.java | 2 +- .../post/TransportUpgradeSettingsAction.java | 2 +- .../ingest/DeletePipelineTransportAction.java | 2 +- .../ingest/GetPipelineTransportAction.java | 2 +- .../ingest/PutPipelineTransportAction.java | 2 +- .../TransportClusterManagerNodeAction.java | 26 ++++++++++++++++--- .../info/TransportClusterInfoAction.java | 25 +++++++++++++++--- .../master/TransportMasterNodeAction.java | 5 ---- .../info/TransportClusterInfoAction.java | 9 ------- .../CompletionPersistentTaskAction.java | 2 +- .../RemovePersistentTaskAction.java | 2 +- .../persistent/StartPersistentTaskAction.java | 2 +- .../UpdatePersistentTaskStatusAction.java | 2 +- .../snapshots/SnapshotsService.java | 2 +- .../indices/get/GetIndexActionTests.java | 4 +-- .../TransportRolloverActionTests.java | 4 +-- .../settings/get/GetSettingsActionTests.java | 8 ++++-- ...ransportClusterManagerNodeActionTests.java | 7 ++--- .../TransportMasterNodeActionUtils.java | 4 +-- .../InternalOrPrivateSettingsPlugin.java | 2 +- .../opensearch/test/ExternalTestCluster.java | 2 +- .../opensearch/test/InternalTestCluster.java | 2 +- .../java/org/opensearch/test/TestCluster.java | 13 +++++++++- 82 files changed, 171 insertions(+), 114 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/repositories/RepositoryBlocksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/repositories/RepositoryBlocksIT.java index 4d3b60da0987f..aff7c5d9876ac 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/repositories/RepositoryBlocksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/repositories/RepositoryBlocksIT.java @@ -97,7 +97,7 @@ public void testVerifyRepositoryWithBlocks() { .prepareVerifyRepository("test-repo-blocks") .execute() .actionGet(); - assertThat(response.getNodes().size(), equalTo(cluster().numDataAndMasterNodes())); + assertThat(response.getNodes().size(), equalTo(cluster().numDataAndClusterManagerNodes())); } finally { setClusterReadOnly(false); } diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java index 1731c607a066d..474a5069ad091 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java @@ -92,7 +92,7 @@ protected void setUpRepository() throws Exception { logger.info("--> verify the repository"); VerifyRepositoryResponse verifyResponse = client().admin().cluster().prepareVerifyRepository(REPOSITORY_NAME).get(); - assertThat(verifyResponse.getNodes().size(), equalTo(cluster().numDataAndMasterNodes())); + assertThat(verifyResponse.getNodes().size(), equalTo(cluster().numDataAndClusterManagerNodes())); logger.info("--> create a snapshot"); CreateSnapshotResponse snapshotResponse = client().admin() diff --git a/server/src/internalClusterTest/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java b/server/src/internalClusterTest/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java index f3954578bffc8..29aab140231c7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java @@ -54,7 +54,7 @@ public class BlobStoreRepositoryCleanupIT extends AbstractSnapshotIntegTestCase public void testClusterManagerFailoverDuringCleanup() throws Exception { startBlockedCleanup("test-repo"); - final int nodeCount = internalCluster().numDataAndMasterNodes(); + final int nodeCount = internalCluster().numDataAndClusterManagerNodes(); logger.info("--> stopping cluster-manager node"); internalCluster().stopCurrentClusterManagerNode(); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java index 9aaafa64ce60c..47fd106ff5a8f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java @@ -74,7 +74,7 @@ public void testRepositoryCreation() throws Exception { logger.info("--> verify the repository"); int numberOfFiles = FileSystemUtils.files(location).length; VerifyRepositoryResponse verifyRepositoryResponse = client.admin().cluster().prepareVerifyRepository("test-repo-1").get(); - assertThat(verifyRepositoryResponse.getNodes().size(), equalTo(cluster().numDataAndMasterNodes())); + assertThat(verifyRepositoryResponse.getNodes().size(), equalTo(cluster().numDataAndClusterManagerNodes())); logger.info("--> verify that we didn't leave any files as a result of verification"); assertThat(FileSystemUtils.files(location).length, equalTo(numberOfFiles)); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java index 16721d5a9ec07..7060ac43af7f9 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java @@ -125,7 +125,7 @@ protected ClusterBlockException checkBlock(ClusterAllocationExplainRequest reque } @Override - protected void masterOperation( + protected void clusterManagerOperation( final ClusterAllocationExplainRequest request, final ClusterState state, final ActionListener listener diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsAction.java index ab72ce964668f..d0f5e8f198809 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsAction.java @@ -126,7 +126,7 @@ protected AddVotingConfigExclusionsResponse read(StreamInput in) throws IOExcept } @Override - protected void masterOperation( + protected void clusterManagerOperation( AddVotingConfigExclusionsRequest request, ClusterState state, ActionListener listener diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java index 3a9da6cebef53..1fc02db4309b1 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java @@ -101,7 +101,7 @@ protected ClearVotingConfigExclusionsResponse read(StreamInput in) throws IOExce } @Override - protected void masterOperation( + protected void clusterManagerOperation( ClearVotingConfigExclusionsRequest request, ClusterState initialState, ActionListener listener diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java index a15fc15ef6a2b..38198a1b74fb3 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -118,14 +118,17 @@ protected ClusterBlockException checkBlock(ClusterHealthRequest request, Cluster } @Override - protected final void masterOperation(ClusterHealthRequest request, ClusterState state, ActionListener listener) - throws Exception { + protected final void clusterManagerOperation( + ClusterHealthRequest request, + ClusterState state, + ActionListener listener + ) throws Exception { logger.warn("attempt to execute a cluster health operation without a task"); throw new UnsupportedOperationException("task parameter is required for this operation"); } @Override - protected void masterOperation( + protected void clusterManagerOperation( final Task task, final ClusterHealthRequest request, final ClusterState unusedState, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java index f3c5d51e1907a..a3804db687a2d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java @@ -174,7 +174,7 @@ protected CleanupRepositoryResponse read(StreamInput in) throws IOException { } @Override - protected void masterOperation( + protected void clusterManagerOperation( CleanupRepositoryRequest request, ClusterState state, ActionListener listener diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java index 4e2072d6a8fa3..08e3bc6df0d83 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java @@ -95,7 +95,7 @@ protected ClusterBlockException checkBlock(DeleteRepositoryRequest request, Clus } @Override - protected void masterOperation( + protected void clusterManagerOperation( final DeleteRepositoryRequest request, ClusterState state, final ActionListener listener diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java index 6e61752c78656..de942ef284f3b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java @@ -99,7 +99,7 @@ protected ClusterBlockException checkBlock(GetRepositoriesRequest request, Clust } @Override - protected void masterOperation( + protected void clusterManagerOperation( final GetRepositoriesRequest request, ClusterState state, final ActionListener listener diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java index 58ccc4e7ff818..6a5be14df93fd 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java @@ -95,7 +95,7 @@ protected ClusterBlockException checkBlock(PutRepositoryRequest request, Cluster } @Override - protected void masterOperation( + protected void clusterManagerOperation( final PutRepositoryRequest request, ClusterState state, final ActionListener listener diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java index a673f34058a83..5215078f52d3b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java @@ -95,7 +95,7 @@ protected ClusterBlockException checkBlock(VerifyRepositoryRequest request, Clus } @Override - protected void masterOperation( + protected void clusterManagerOperation( final VerifyRepositoryRequest request, ClusterState state, final ActionListener listener diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java index 5080ce2c0fd67..3e5ebdd6a17d3 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java @@ -119,7 +119,7 @@ protected ClusterRerouteResponse read(StreamInput in) throws IOException { } @Override - protected void masterOperation( + protected void clusterManagerOperation( final ClusterRerouteRequest request, final ClusterState state, final ActionListener listener diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java index 47024e98f82b1..ef404375485a2 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java @@ -124,7 +124,7 @@ protected ClusterUpdateSettingsResponse read(StreamInput in) throws IOException } @Override - protected void masterOperation( + protected void clusterManagerOperation( final ClusterUpdateSettingsRequest request, final ClusterState state, final ActionListener listener diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java index ae2d2aeb827ba..2f7c194e0acd7 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java @@ -108,7 +108,7 @@ protected ClusterSearchShardsResponse read(StreamInput in) throws IOException { } @Override - protected void masterOperation( + protected void clusterManagerOperation( final ClusterSearchShardsRequest request, final ClusterState state, final ActionListener listener diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/TransportCloneSnapshotAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/TransportCloneSnapshotAction.java index f4e90bbbfb961..e9f5153f78700 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/TransportCloneSnapshotAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/TransportCloneSnapshotAction.java @@ -90,7 +90,11 @@ protected AcknowledgedResponse read(StreamInput in) throws IOException { } @Override - protected void masterOperation(CloneSnapshotRequest request, ClusterState state, ActionListener listener) { + protected void clusterManagerOperation( + CloneSnapshotRequest request, + ClusterState state, + ActionListener listener + ) { snapshotsService.cloneSnapshot(request, ActionListener.map(listener, v -> new AcknowledgedResponse(true))); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java index 4b28bafc258cf..ed4af6d915792 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java @@ -98,7 +98,7 @@ protected ClusterBlockException checkBlock(CreateSnapshotRequest request, Cluste } @Override - protected void masterOperation( + protected void clusterManagerOperation( final CreateSnapshotRequest request, ClusterState state, final ActionListener listener diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java index d5e18dcd31fc9..c78968c2a0848 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java @@ -95,7 +95,7 @@ protected ClusterBlockException checkBlock(DeleteSnapshotRequest request, Cluste } @Override - protected void masterOperation( + protected void clusterManagerOperation( final DeleteSnapshotRequest request, ClusterState state, final ActionListener listener diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java index 0be3f8be0bc80..d05e62045a1a2 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java @@ -122,7 +122,7 @@ protected ClusterBlockException checkBlock(GetSnapshotsRequest request, ClusterS } @Override - protected void masterOperation( + protected void clusterManagerOperation( final GetSnapshotsRequest request, final ClusterState state, final ActionListener listener diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java index fa7c0c6efa469..e7d95b9e40880 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java @@ -102,7 +102,7 @@ protected ClusterBlockException checkBlock(RestoreSnapshotRequest request, Clust } @Override - protected void masterOperation( + protected void clusterManagerOperation( final RestoreSnapshotRequest request, final ClusterState state, final ActionListener listener diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index e73a68e335316..bd7391a7939a1 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -136,7 +136,7 @@ protected SnapshotsStatusResponse read(StreamInput in) throws IOException { } @Override - protected void masterOperation( + protected void clusterManagerOperation( final SnapshotsStatusRequest request, final ClusterState state, final ActionListener listener diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java index a30b6fd580f1a..9d65146716496 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -115,7 +115,7 @@ protected ClusterBlockException checkBlock(ClusterStateRequest request, ClusterS } @Override - protected void masterOperation( + protected void clusterManagerOperation( final ClusterStateRequest request, final ClusterState state, final ActionListener listener diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportDeleteStoredScriptAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportDeleteStoredScriptAction.java index 365d7df0f650d..4bc8d836a8200 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportDeleteStoredScriptAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportDeleteStoredScriptAction.java @@ -90,8 +90,11 @@ protected AcknowledgedResponse read(StreamInput in) throws IOException { } @Override - protected void masterOperation(DeleteStoredScriptRequest request, ClusterState state, ActionListener listener) - throws Exception { + protected void clusterManagerOperation( + DeleteStoredScriptRequest request, + ClusterState state, + ActionListener listener + ) throws Exception { scriptService.deleteStoredScript(clusterService, request, listener); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportGetStoredScriptAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportGetStoredScriptAction.java index d2d5a49fcde23..8dbadf34ab06b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportGetStoredScriptAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportGetStoredScriptAction.java @@ -89,8 +89,11 @@ protected GetStoredScriptResponse read(StreamInput in) throws IOException { } @Override - protected void masterOperation(GetStoredScriptRequest request, ClusterState state, ActionListener listener) - throws Exception { + protected void clusterManagerOperation( + GetStoredScriptRequest request, + ClusterState state, + ActionListener listener + ) throws Exception { listener.onResponse(new GetStoredScriptResponse(request.id(), scriptService.getStoredScript(state, request))); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportPutStoredScriptAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportPutStoredScriptAction.java index db76394c45cd5..bb259f173d470 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportPutStoredScriptAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportPutStoredScriptAction.java @@ -90,8 +90,11 @@ protected AcknowledgedResponse read(StreamInput in) throws IOException { } @Override - protected void masterOperation(PutStoredScriptRequest request, ClusterState state, ActionListener listener) - throws Exception { + protected void clusterManagerOperation( + PutStoredScriptRequest request, + ClusterState state, + ActionListener listener + ) throws Exception { scriptService.putStoredScript(clusterService, request, listener); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java index 8962f0395cc6f..e08a76c5cfc2a 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java @@ -100,7 +100,7 @@ protected ClusterBlockException checkBlock(PendingClusterTasksRequest request, C } @Override - protected void masterOperation( + protected void clusterManagerOperation( PendingClusterTasksRequest request, ClusterState state, ActionListener listener diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/TransportIndicesAliasesAction.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/TransportIndicesAliasesAction.java index aacd8e25ff827..3e453b42c3d7c 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/TransportIndicesAliasesAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/TransportIndicesAliasesAction.java @@ -126,7 +126,7 @@ protected ClusterBlockException checkBlock(IndicesAliasesRequest request, Cluste } @Override - protected void masterOperation( + protected void clusterManagerOperation( final IndicesAliasesRequest request, final ClusterState state, final ActionListener listener diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/TransportGetAliasesAction.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/TransportGetAliasesAction.java index a2f975ff9cbbc..fe9c2dbccdf7b 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/TransportGetAliasesAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/TransportGetAliasesAction.java @@ -112,7 +112,7 @@ protected GetAliasesResponse read(StreamInput in) throws IOException { } @Override - protected void masterOperation(GetAliasesRequest request, ClusterState state, ActionListener listener) { + protected void clusterManagerOperation(GetAliasesRequest request, ClusterState state, ActionListener listener) { String[] concreteIndices; // Switch to a context which will drop any deprecation warnings, because there may be indices resolved here which are not // returned in the final response. We'll add warnings back later if necessary in checkSystemIndexAccess. diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/TransportCloseIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/close/TransportCloseIndexAction.java index dc6c94b73065d..c4c789a8de90e 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/TransportCloseIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/TransportCloseIndexAction.java @@ -140,7 +140,7 @@ protected ClusterBlockException checkBlock(CloseIndexRequest request, ClusterSta } @Override - protected void masterOperation( + protected void clusterManagerOperation( final CloseIndexRequest request, final ClusterState state, final ActionListener listener @@ -149,7 +149,7 @@ protected void masterOperation( } @Override - protected void masterOperation( + protected void clusterManagerOperation( final Task task, final CloseIndexRequest request, final ClusterState state, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/AutoCreateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/create/AutoCreateAction.java index fdfeb37ac64a7..73a2996945aff 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/AutoCreateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/AutoCreateAction.java @@ -112,7 +112,11 @@ protected CreateIndexResponse read(StreamInput in) throws IOException { } @Override - protected void masterOperation(CreateIndexRequest request, ClusterState state, ActionListener finalListener) { + protected void clusterManagerOperation( + CreateIndexRequest request, + ClusterState state, + ActionListener finalListener + ) { AtomicReference indexNameRef = new AtomicReference<>(); ActionListener listener = ActionListener.wrap(response -> { String indexName = indexNameRef.get(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/TransportCreateIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/create/TransportCreateIndexAction.java index 4e145b9905c95..daba4d0f167f8 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/TransportCreateIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/TransportCreateIndexAction.java @@ -95,7 +95,7 @@ protected ClusterBlockException checkBlock(CreateIndexRequest request, ClusterSt } @Override - protected void masterOperation( + protected void clusterManagerOperation( final CreateIndexRequest request, final ClusterState state, final ActionListener listener diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/TransportDeleteDanglingIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/TransportDeleteDanglingIndexAction.java index 9ad2058647592..015a0f6727ab7 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/TransportDeleteDanglingIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/TransportDeleteDanglingIndexAction.java @@ -115,7 +115,7 @@ protected AcknowledgedResponse read(StreamInput in) throws IOException { } @Override - protected void masterOperation( + protected void clusterManagerOperation( DeleteDanglingIndexRequest deleteRequest, ClusterState state, ActionListener deleteListener diff --git a/server/src/main/java/org/opensearch/action/admin/indices/datastream/CreateDataStreamAction.java b/server/src/main/java/org/opensearch/action/admin/indices/datastream/CreateDataStreamAction.java index 763216912dc05..ddc93dd1fcf6c 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/datastream/CreateDataStreamAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/datastream/CreateDataStreamAction.java @@ -162,7 +162,7 @@ protected AcknowledgedResponse read(StreamInput in) throws IOException { } @Override - protected void masterOperation(Request request, ClusterState state, ActionListener listener) + protected void clusterManagerOperation(Request request, ClusterState state, ActionListener listener) throws Exception { CreateDataStreamClusterStateUpdateRequest updateRequest = new CreateDataStreamClusterStateUpdateRequest( request.name, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/datastream/DeleteDataStreamAction.java b/server/src/main/java/org/opensearch/action/admin/indices/datastream/DeleteDataStreamAction.java index b9b855d364726..74b0a84782283 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/datastream/DeleteDataStreamAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/datastream/DeleteDataStreamAction.java @@ -192,7 +192,7 @@ protected AcknowledgedResponse read(StreamInput in) throws IOException { } @Override - protected void masterOperation(Request request, ClusterState state, ActionListener listener) + protected void clusterManagerOperation(Request request, ClusterState state, ActionListener listener) throws Exception { clusterService.submitStateUpdateTask( "remove-data-stream [" + Strings.arrayToCommaDelimitedString(request.names) + "]", diff --git a/server/src/main/java/org/opensearch/action/admin/indices/datastream/GetDataStreamAction.java b/server/src/main/java/org/opensearch/action/admin/indices/datastream/GetDataStreamAction.java index 6140d10bd293c..61fad265e16e6 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/datastream/GetDataStreamAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/datastream/GetDataStreamAction.java @@ -313,7 +313,7 @@ protected Response read(StreamInput in) throws IOException { } @Override - protected void masterOperation(Request request, ClusterState state, ActionListener listener) throws Exception { + protected void clusterManagerOperation(Request request, ClusterState state, ActionListener listener) throws Exception { List dataStreams = getDataStreams(state, indexNameExpressionResolver, request); List dataStreamInfos = new ArrayList<>(dataStreams.size()); for (DataStream dataStream : dataStreams) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/delete/TransportDeleteIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/delete/TransportDeleteIndexAction.java index fc79a8be39527..a3d14846338e7 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/delete/TransportDeleteIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/delete/TransportDeleteIndexAction.java @@ -115,7 +115,7 @@ protected ClusterBlockException checkBlock(DeleteIndexRequest request, ClusterSt } @Override - protected void masterOperation( + protected void clusterManagerOperation( final DeleteIndexRequest request, final ClusterState state, final ActionListener listener diff --git a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java index a7f73a203f4c5..f5f7e0e9ea7b7 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java @@ -103,7 +103,7 @@ protected ClusterBlockException checkBlock(IndicesExistsRequest request, Cluster } @Override - protected void masterOperation( + protected void clusterManagerOperation( final IndicesExistsRequest request, final ClusterState state, final ActionListener listener diff --git a/server/src/main/java/org/opensearch/action/admin/indices/get/TransportGetIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/get/TransportGetIndexAction.java index 0142e70d18221..de272bab332a7 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/get/TransportGetIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/get/TransportGetIndexAction.java @@ -98,7 +98,7 @@ protected GetIndexResponse read(StreamInput in) throws IOException { } @Override - protected void doMasterOperation( + protected void doClusterManagerOperation( final GetIndexRequest request, String[] concreteIndices, final ClusterState state, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java index 71438ad300e0c..e724320728b66 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java @@ -88,7 +88,7 @@ protected GetMappingsResponse read(StreamInput in) throws IOException { } @Override - protected void doMasterOperation( + protected void doClusterManagerOperation( final GetMappingsRequest request, String[] concreteIndices, final ClusterState state, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportAutoPutMappingAction.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportAutoPutMappingAction.java index e9e8770478853..c4dad614c53dd 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportAutoPutMappingAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportAutoPutMappingAction.java @@ -107,7 +107,7 @@ protected ClusterBlockException checkBlock(PutMappingRequest request, ClusterSta } @Override - protected void masterOperation( + protected void clusterManagerOperation( final PutMappingRequest request, final ClusterState state, final ActionListener listener diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportPutMappingAction.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportPutMappingAction.java index a6a038fe0708b..de546f428bafa 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportPutMappingAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportPutMappingAction.java @@ -119,7 +119,7 @@ protected ClusterBlockException checkBlock(PutMappingRequest request, ClusterSta } @Override - protected void masterOperation( + protected void clusterManagerOperation( final PutMappingRequest request, final ClusterState state, final ActionListener listener diff --git a/server/src/main/java/org/opensearch/action/admin/indices/open/TransportOpenIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/open/TransportOpenIndexAction.java index 98816f8677e49..2ccb5f6d22886 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/open/TransportOpenIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/open/TransportOpenIndexAction.java @@ -114,7 +114,7 @@ protected ClusterBlockException checkBlock(OpenIndexRequest request, ClusterStat } @Override - protected void masterOperation( + protected void clusterManagerOperation( final OpenIndexRequest request, final ClusterState state, final ActionListener listener diff --git a/server/src/main/java/org/opensearch/action/admin/indices/readonly/TransportAddIndexBlockAction.java b/server/src/main/java/org/opensearch/action/admin/indices/readonly/TransportAddIndexBlockAction.java index e7fde50a0f46c..560d2e6389c63 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/readonly/TransportAddIndexBlockAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/readonly/TransportAddIndexBlockAction.java @@ -123,13 +123,13 @@ protected ClusterBlockException checkBlock(AddIndexBlockRequest request, Cluster } @Override - protected void masterOperation(AddIndexBlockRequest request, ClusterState state, ActionListener listener) + protected void clusterManagerOperation(AddIndexBlockRequest request, ClusterState state, ActionListener listener) throws Exception { throw new UnsupportedOperationException("The task parameter is required"); } @Override - protected void masterOperation( + protected void clusterManagerOperation( final Task task, final AddIndexBlockRequest request, final ClusterState state, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/TransportRolloverAction.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/TransportRolloverAction.java index 29bd2e0ab5a1e..4e5e7ec9184fe 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -130,13 +130,13 @@ protected ClusterBlockException checkBlock(RolloverRequest request, ClusterState } @Override - protected void masterOperation(RolloverRequest request, ClusterState state, ActionListener listener) + protected void clusterManagerOperation(RolloverRequest request, ClusterState state, ActionListener listener) throws Exception { throw new UnsupportedOperationException("The task parameter is required"); } @Override - protected void masterOperation( + protected void clusterManagerOperation( Task task, final RolloverRequest rolloverRequest, final ClusterState state, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/get/TransportGetSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/get/TransportGetSettingsAction.java index 000d6d70d7af7..cfa75167afa09 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/get/TransportGetSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/get/TransportGetSettingsAction.java @@ -110,7 +110,7 @@ private static boolean isFilteredRequest(GetSettingsRequest request) { } @Override - protected void masterOperation(GetSettingsRequest request, ClusterState state, ActionListener listener) { + protected void clusterManagerOperation(GetSettingsRequest request, ClusterState state, ActionListener listener) { Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); ImmutableOpenMap.Builder indexToSettingsBuilder = ImmutableOpenMap.builder(); ImmutableOpenMap.Builder indexToDefaultSettingsBuilder = ImmutableOpenMap.builder(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java index ad893c69a9847..a959fb043e4c6 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java @@ -116,7 +116,7 @@ protected AcknowledgedResponse read(StreamInput in) throws IOException { } @Override - protected void masterOperation( + protected void clusterManagerOperation( final UpdateSettingsRequest request, final ClusterState state, final ActionListener listener diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java b/server/src/main/java/org/opensearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java index b2f2c9e5d03a3..cfe682e47f688 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java @@ -121,7 +121,7 @@ protected IndicesShardStoresResponse read(StreamInput in) throws IOException { } @Override - protected void masterOperation( + protected void clusterManagerOperation( IndicesShardStoresRequest request, ClusterState state, ActionListener listener diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java index 88af27fa21ea3..ba079aeb03921 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java @@ -127,7 +127,7 @@ protected ClusterBlockException checkBlock(ResizeRequest request, ClusterState s } @Override - protected void masterOperation( + protected void clusterManagerOperation( final ResizeRequest resizeRequest, final ClusterState state, final ActionListener listener diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteComponentTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteComponentTemplateAction.java index 841810ce9e664..75cc8ffe05f73 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteComponentTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteComponentTemplateAction.java @@ -102,7 +102,7 @@ protected ClusterBlockException checkBlock(DeleteComponentTemplateAction.Request } @Override - protected void masterOperation( + protected void clusterManagerOperation( final DeleteComponentTemplateAction.Request request, final ClusterState state, final ActionListener listener diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteComposableIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteComposableIndexTemplateAction.java index 0e2043848aca3..52464dbd90e3f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteComposableIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteComposableIndexTemplateAction.java @@ -102,7 +102,7 @@ protected ClusterBlockException checkBlock(DeleteComposableIndexTemplateAction.R } @Override - protected void masterOperation( + protected void clusterManagerOperation( final DeleteComposableIndexTemplateAction.Request request, final ClusterState state, final ActionListener listener diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java index 80f008072b024..3d3c2bd0613f2 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java @@ -102,7 +102,7 @@ protected ClusterBlockException checkBlock(DeleteIndexTemplateRequest request, C } @Override - protected void masterOperation( + protected void clusterManagerOperation( final DeleteIndexTemplateRequest request, final ClusterState state, final ActionListener listener diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetComponentTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetComponentTemplateAction.java index c6016ad78a681..36e3a1d0e6264 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetComponentTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetComponentTemplateAction.java @@ -96,7 +96,7 @@ protected ClusterBlockException checkBlock(GetComponentTemplateAction.Request re } @Override - protected void masterOperation( + protected void clusterManagerOperation( GetComponentTemplateAction.Request request, ClusterState state, ActionListener listener diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetComposableIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetComposableIndexTemplateAction.java index 405dc7afc769f..327a40be64a2a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetComposableIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetComposableIndexTemplateAction.java @@ -96,7 +96,7 @@ protected ClusterBlockException checkBlock(GetComposableIndexTemplateAction.Requ } @Override - protected void masterOperation( + protected void clusterManagerOperation( GetComposableIndexTemplateAction.Request request, ClusterState state, ActionListener listener diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java index 44969022c9e06..d74ff9e309842 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java @@ -96,7 +96,7 @@ protected ClusterBlockException checkBlock(GetIndexTemplatesRequest request, Clu } @Override - protected void masterOperation( + protected void clusterManagerOperation( GetIndexTemplatesRequest request, ClusterState state, ActionListener listener diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java index 51a634e876886..ee2a049d5f4a5 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java @@ -127,7 +127,7 @@ protected SimulateIndexTemplateResponse read(StreamInput in) throws IOException } @Override - protected void masterOperation( + protected void clusterManagerOperation( SimulateIndexTemplateRequest request, ClusterState state, ActionListener listener diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java index 5b7395b3bc3a1..71ded5687ac72 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java @@ -113,7 +113,7 @@ protected SimulateIndexTemplateResponse read(StreamInput in) throws IOException } @Override - protected void masterOperation( + protected void clusterManagerOperation( SimulateTemplateAction.Request request, ClusterState state, ActionListener listener diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComponentTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComponentTemplateAction.java index 30f196142515b..925913c4e8d3e 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComponentTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComponentTemplateAction.java @@ -106,7 +106,7 @@ protected ClusterBlockException checkBlock(PutComponentTemplateAction.Request re } @Override - protected void masterOperation( + protected void clusterManagerOperation( final PutComponentTemplateAction.Request request, final ClusterState state, final ActionListener listener diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComposableIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComposableIndexTemplateAction.java index d8bda8839aa42..20ba5376f1add 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComposableIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComposableIndexTemplateAction.java @@ -99,7 +99,7 @@ protected ClusterBlockException checkBlock(PutComposableIndexTemplateAction.Requ } @Override - protected void masterOperation( + protected void clusterManagerOperation( final PutComposableIndexTemplateAction.Request request, final ClusterState state, final ActionListener listener diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java index 8e8bcbab6f2b7..c93d83cd5e70d 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java @@ -106,7 +106,7 @@ protected ClusterBlockException checkBlock(PutIndexTemplateRequest request, Clus } @Override - protected void masterOperation( + protected void clusterManagerOperation( final PutIndexTemplateRequest request, final ClusterState state, final ActionListener listener diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java index f27edbb8a4b62..df0d5cf57e7de 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java @@ -102,7 +102,7 @@ protected AcknowledgedResponse read(StreamInput in) throws IOException { } @Override - protected void masterOperation( + protected void clusterManagerOperation( final UpgradeSettingsRequest request, final ClusterState state, final ActionListener listener diff --git a/server/src/main/java/org/opensearch/action/ingest/DeletePipelineTransportAction.java b/server/src/main/java/org/opensearch/action/ingest/DeletePipelineTransportAction.java index 590badeb54756..9085b2347765c 100644 --- a/server/src/main/java/org/opensearch/action/ingest/DeletePipelineTransportAction.java +++ b/server/src/main/java/org/opensearch/action/ingest/DeletePipelineTransportAction.java @@ -88,7 +88,7 @@ protected AcknowledgedResponse read(StreamInput in) throws IOException { } @Override - protected void masterOperation(DeletePipelineRequest request, ClusterState state, ActionListener listener) + protected void clusterManagerOperation(DeletePipelineRequest request, ClusterState state, ActionListener listener) throws Exception { ingestService.delete(request, listener); } diff --git a/server/src/main/java/org/opensearch/action/ingest/GetPipelineTransportAction.java b/server/src/main/java/org/opensearch/action/ingest/GetPipelineTransportAction.java index 3a5493bfa4b36..5a59c8255361e 100644 --- a/server/src/main/java/org/opensearch/action/ingest/GetPipelineTransportAction.java +++ b/server/src/main/java/org/opensearch/action/ingest/GetPipelineTransportAction.java @@ -85,7 +85,7 @@ protected GetPipelineResponse read(StreamInput in) throws IOException { } @Override - protected void masterOperation(GetPipelineRequest request, ClusterState state, ActionListener listener) + protected void clusterManagerOperation(GetPipelineRequest request, ClusterState state, ActionListener listener) throws Exception { listener.onResponse(new GetPipelineResponse(IngestService.getPipelines(state, request.getIds()))); } diff --git a/server/src/main/java/org/opensearch/action/ingest/PutPipelineTransportAction.java b/server/src/main/java/org/opensearch/action/ingest/PutPipelineTransportAction.java index 448aa2855c3aa..61a2deedfd511 100644 --- a/server/src/main/java/org/opensearch/action/ingest/PutPipelineTransportAction.java +++ b/server/src/main/java/org/opensearch/action/ingest/PutPipelineTransportAction.java @@ -103,7 +103,7 @@ protected AcknowledgedResponse read(StreamInput in) throws IOException { } @Override - protected void masterOperation(PutPipelineRequest request, ClusterState state, ActionListener listener) + protected void clusterManagerOperation(PutPipelineRequest request, ClusterState state, ActionListener listener) throws Exception { NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(); nodesInfoRequest.clear().addMetric(NodesInfoRequest.Metric.INGEST.metricName()); diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java index a30189a0899a4..d65ba3eddf776 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java @@ -117,13 +117,30 @@ protected TransportClusterManagerNodeAction( protected abstract Response read(StreamInput in) throws IOException; - protected abstract void masterOperation(Request request, ClusterState state, ActionListener listener) throws Exception; + /** + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerOperation(ClusterManagerNodeRequest, ClusterState, ActionListener)} + */ + @Deprecated + protected void masterOperation(Request request, ClusterState state, ActionListener listener) throws Exception { + throw new UnsupportedOperationException("Must be overridden"); + } + + protected void clusterManagerOperation(Request request, ClusterState state, ActionListener listener) throws Exception { + masterOperation(request, state, listener); + } + + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerOperation(Task, ClusterManagerNodeRequest, ClusterState, ActionListener)} */ + @Deprecated + protected void masterOperation(Task task, Request request, ClusterState state, ActionListener listener) throws Exception { + clusterManagerOperation(task, request, state, listener); + } /** * Override this operation if access to the task parameter is needed */ - protected void masterOperation(Task task, Request request, ClusterState state, ActionListener listener) throws Exception { - masterOperation(request, state, listener); + protected void clusterManagerOperation(Task task, Request request, ClusterState state, ActionListener listener) + throws Exception { + clusterManagerOperation(request, state, listener); } protected boolean localExecute(Request request) { @@ -201,7 +218,7 @@ protected void doStart(ClusterState clusterState) { } }); threadPool.executor(executor) - .execute(ActionRunnable.wrap(delegate, l -> masterOperation(task, request, clusterState, l))); + .execute(ActionRunnable.wrap(delegate, l -> clusterManagerOperation(task, request, clusterState, l))); } } else { if (nodes.getClusterManagerNode() == null) { @@ -304,4 +321,5 @@ protected String getClusterManagerActionName(DiscoveryNode node) { protected String getMasterActionName(DiscoveryNode node) { return getClusterManagerActionName(node); } + } diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/info/TransportClusterInfoAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/info/TransportClusterInfoAction.java index caf89fc7b6c8e..1411ff7b30695 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/info/TransportClusterInfoAction.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/info/TransportClusterInfoAction.java @@ -76,16 +76,33 @@ protected ClusterBlockException checkBlock(Request request, ClusterState state) .indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, request)); } - @Override + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerOperation(ClusterInfoRequest, ClusterState, ActionListener)} */ + @Deprecated protected final void masterOperation(final Request request, final ClusterState state, final ActionListener listener) { + clusterManagerOperation(request, state, listener); + } + + @Override + protected final void clusterManagerOperation(final Request request, final ClusterState state, final ActionListener listener) { String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request); - doMasterOperation(request, concreteIndices, state, listener); + doClusterManagerOperation(request, concreteIndices, state, listener); } - protected abstract void doMasterOperation( + protected void doClusterManagerOperation( Request request, String[] concreteIndices, ClusterState state, ActionListener listener - ); + ) { + doMasterOperation(request, concreteIndices, state, listener); + } + + /** + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #doClusterManagerOperation(ClusterInfoRequest, String[], ClusterState, ActionListener)} + */ + @Deprecated + protected void doMasterOperation(Request request, String[] concreteIndices, ClusterState state, ActionListener listener) { + throw new UnsupportedOperationException("Must be overridden"); + } + } diff --git a/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeAction.java b/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeAction.java index c26fa5c343b5c..2524e839ba11e 100644 --- a/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeAction.java +++ b/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeAction.java @@ -32,11 +32,9 @@ package org.opensearch.action.support.master; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionResponse; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; -import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.io.stream.Writeable; @@ -87,7 +85,4 @@ protected TransportMasterNodeAction( ); } - @Deprecated - protected abstract void masterOperation(Request request, ClusterState state, ActionListener listener) throws Exception; - } diff --git a/server/src/main/java/org/opensearch/action/support/master/info/TransportClusterInfoAction.java b/server/src/main/java/org/opensearch/action/support/master/info/TransportClusterInfoAction.java index 974fad445be9e..26d31b874f2c0 100644 --- a/server/src/main/java/org/opensearch/action/support/master/info/TransportClusterInfoAction.java +++ b/server/src/main/java/org/opensearch/action/support/master/info/TransportClusterInfoAction.java @@ -31,10 +31,8 @@ package org.opensearch.action.support.master.info; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionResponse; import org.opensearch.action.support.ActionFilters; -import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.io.stream.Writeable; @@ -61,11 +59,4 @@ public TransportClusterInfoAction( super(actionName, transportService, clusterService, threadPool, actionFilters, request, indexNameExpressionResolver); } - @Deprecated - protected abstract void doMasterOperation( - Request request, - String[] concreteIndices, - ClusterState state, - ActionListener listener - ); } diff --git a/server/src/main/java/org/opensearch/persistent/CompletionPersistentTaskAction.java b/server/src/main/java/org/opensearch/persistent/CompletionPersistentTaskAction.java index 1dda269a68491..d036457ccae89 100644 --- a/server/src/main/java/org/opensearch/persistent/CompletionPersistentTaskAction.java +++ b/server/src/main/java/org/opensearch/persistent/CompletionPersistentTaskAction.java @@ -193,7 +193,7 @@ protected ClusterBlockException checkBlock(Request request, ClusterState state) } @Override - protected final void masterOperation( + protected final void clusterManagerOperation( final Request request, ClusterState state, final ActionListener listener diff --git a/server/src/main/java/org/opensearch/persistent/RemovePersistentTaskAction.java b/server/src/main/java/org/opensearch/persistent/RemovePersistentTaskAction.java index 56436d0e1aa0c..d07dcc23056d8 100644 --- a/server/src/main/java/org/opensearch/persistent/RemovePersistentTaskAction.java +++ b/server/src/main/java/org/opensearch/persistent/RemovePersistentTaskAction.java @@ -181,7 +181,7 @@ protected ClusterBlockException checkBlock(Request request, ClusterState state) } @Override - protected final void masterOperation( + protected final void clusterManagerOperation( final Request request, ClusterState state, final ActionListener listener diff --git a/server/src/main/java/org/opensearch/persistent/StartPersistentTaskAction.java b/server/src/main/java/org/opensearch/persistent/StartPersistentTaskAction.java index 6f2f3a565427b..4864cf3c23b50 100644 --- a/server/src/main/java/org/opensearch/persistent/StartPersistentTaskAction.java +++ b/server/src/main/java/org/opensearch/persistent/StartPersistentTaskAction.java @@ -254,7 +254,7 @@ protected ClusterBlockException checkBlock(Request request, ClusterState state) } @Override - protected final void masterOperation( + protected final void clusterManagerOperation( final Request request, ClusterState state, final ActionListener listener diff --git a/server/src/main/java/org/opensearch/persistent/UpdatePersistentTaskStatusAction.java b/server/src/main/java/org/opensearch/persistent/UpdatePersistentTaskStatusAction.java index aee79ac1ba3ea..acbb62373ab60 100644 --- a/server/src/main/java/org/opensearch/persistent/UpdatePersistentTaskStatusAction.java +++ b/server/src/main/java/org/opensearch/persistent/UpdatePersistentTaskStatusAction.java @@ -213,7 +213,7 @@ protected ClusterBlockException checkBlock(Request request, ClusterState state) } @Override - protected final void masterOperation( + protected final void clusterManagerOperation( final Request request, final ClusterState state, final ActionListener listener diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java index 024fb6ec53089..0aa967f87be9b 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java @@ -3654,7 +3654,7 @@ protected UpdateIndexShardSnapshotStatusResponse read(StreamInput in) throws IOE } @Override - protected void masterOperation( + protected void clusterManagerOperation( UpdateIndexShardSnapshotStatusRequest request, ClusterState state, ActionListener listener diff --git a/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexActionTests.java index 5648f14fa69dd..001efb32c2988 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexActionTests.java @@ -145,14 +145,14 @@ class TestTransportGetIndexAction extends TransportGetIndexAction { } @Override - protected void doMasterOperation( + protected void doClusterManagerOperation( GetIndexRequest request, String[] concreteIndices, ClusterState state, ActionListener listener ) { ClusterState stateWithIndex = ClusterStateCreationUtils.state(indexName, 1, 1); - super.doMasterOperation(request, concreteIndices, stateWithIndex, listener); + super.doClusterManagerOperation(request, concreteIndices, stateWithIndex, listener); } } diff --git a/server/src/test/java/org/opensearch/action/admin/indices/rollover/TransportRolloverActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/rollover/TransportRolloverActionTests.java index 8ef8de81041e3..b206c2e19a65b 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/rollover/TransportRolloverActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/rollover/TransportRolloverActionTests.java @@ -298,7 +298,7 @@ public void testConditionEvaluationWhenAliasToWriteAndReadIndicesConsidersOnlyPr RolloverRequest rolloverRequest = new RolloverRequest("logs-alias", "logs-index-000003"); rolloverRequest.addMaxIndexDocsCondition(500L); rolloverRequest.dryRun(true); - transportRolloverAction.masterOperation(mock(Task.class), rolloverRequest, stateBefore, future); + transportRolloverAction.clusterManagerOperation(mock(Task.class), rolloverRequest, stateBefore, future); RolloverResponse response = future.actionGet(); assertThat(response.getOldIndex(), equalTo("logs-index-000002")); @@ -314,7 +314,7 @@ public void testConditionEvaluationWhenAliasToWriteAndReadIndicesConsidersOnlyPr rolloverRequest = new RolloverRequest("logs-alias", "logs-index-000003"); rolloverRequest.addMaxIndexDocsCondition(300L); rolloverRequest.dryRun(true); - transportRolloverAction.masterOperation(mock(Task.class), rolloverRequest, stateBefore, future); + transportRolloverAction.clusterManagerOperation(mock(Task.class), rolloverRequest, stateBefore, future); response = future.actionGet(); assertThat(response.getOldIndex(), equalTo("logs-index-000002")); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/settings/get/GetSettingsActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/settings/get/GetSettingsActionTests.java index cefb59c40949c..dfbc1dccb9d6a 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/settings/get/GetSettingsActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/settings/get/GetSettingsActionTests.java @@ -84,9 +84,13 @@ class TestTransportGetSettingsAction extends TransportGetSettingsAction { } @Override - protected void masterOperation(GetSettingsRequest request, ClusterState state, ActionListener listener) { + protected void clusterManagerOperation( + GetSettingsRequest request, + ClusterState state, + ActionListener listener + ) { ClusterState stateWithIndex = ClusterStateCreationUtils.state(indexName, 1, 1); - super.masterOperation(request, stateWithIndex, listener); + super.clusterManagerOperation(request, stateWithIndex, listener); } } diff --git a/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java index f0b62914213c1..9ec9e656257d6 100644 --- a/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java @@ -229,7 +229,7 @@ protected Response read(StreamInput in) throws IOException { } @Override - protected void masterOperation(Request request, ClusterState state, ActionListener listener) throws Exception { + protected void clusterManagerOperation(Request request, ClusterState state, ActionListener listener) throws Exception { listener.onResponse(new Response()); // default implementation, overridden in specific tests } @@ -252,7 +252,7 @@ public void testLocalOperationWithoutBlocks() throws ExecutionException, Interru new Action("internal:testAction", transportService, clusterService, threadPool) { @Override - protected void masterOperation(Task task, Request request, ClusterState state, ActionListener listener) { + protected void clusterManagerOperation(Task task, Request request, ClusterState state, ActionListener listener) { if (clusterManagerOperationFailure) { listener.onFailure(exception); } else { @@ -511,7 +511,8 @@ public void testClusterManagerFailoverAfterStepDown() throws ExecutionException, new Action("internal:testAction", transportService, clusterService, threadPool) { @Override - protected void masterOperation(Request request, ClusterState state, ActionListener listener) throws Exception { + protected void clusterManagerOperation(Request request, ClusterState state, ActionListener listener) + throws Exception { // The other node has become cluster-manager, simulate failures of this node while publishing cluster state through // ZenDiscovery setState(clusterService, ClusterStateCreationUtils.state(localNode, remoteNode, allNodes)); diff --git a/server/src/test/java/org/opensearch/action/support/clustermanager/TransportMasterNodeActionUtils.java b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportMasterNodeActionUtils.java index 3927cd1d13040..b9abddc5622c9 100644 --- a/server/src/test/java/org/opensearch/action/support/clustermanager/TransportMasterNodeActionUtils.java +++ b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportMasterNodeActionUtils.java @@ -39,7 +39,7 @@ public class TransportMasterNodeActionUtils { /** - * Allows to directly call {@link TransportClusterManagerNodeAction#masterOperation(ClusterManagerNodeRequest, ClusterState, ActionListener)} which is + * Allows to directly call {@link TransportClusterManagerNodeAction#clusterManagerOperation(ClusterManagerNodeRequest, ClusterState, ActionListener)} which is * a protected method. */ public static , Response extends ActionResponse> void runClusterManagerOperation( @@ -49,6 +49,6 @@ public static , Response exte ActionListener actionListener ) throws Exception { assert clusterManagerNodeAction.checkBlock(request, clusterState) == null; - clusterManagerNodeAction.masterOperation(request, clusterState, actionListener); + clusterManagerNodeAction.clusterManagerOperation(request, clusterState, actionListener); } } diff --git a/server/src/test/java/org/opensearch/indices/settings/InternalOrPrivateSettingsPlugin.java b/server/src/test/java/org/opensearch/indices/settings/InternalOrPrivateSettingsPlugin.java index 775b4bb185881..2e244908dc4eb 100644 --- a/server/src/test/java/org/opensearch/indices/settings/InternalOrPrivateSettingsPlugin.java +++ b/server/src/test/java/org/opensearch/indices/settings/InternalOrPrivateSettingsPlugin.java @@ -173,7 +173,7 @@ protected UpdateInternalOrPrivateAction.Response read(StreamInput in) throws IOE } @Override - protected void masterOperation( + protected void clusterManagerOperation( final UpdateInternalOrPrivateAction.Request request, final ClusterState state, final ActionListener listener diff --git a/test/framework/src/main/java/org/opensearch/test/ExternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/ExternalTestCluster.java index 33147447bd469..e84bde1a064a8 100644 --- a/test/framework/src/main/java/org/opensearch/test/ExternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/ExternalTestCluster.java @@ -190,7 +190,7 @@ public int numDataNodes() { } @Override - public int numDataAndMasterNodes() { + public int numDataAndClusterManagerNodes() { return numClusterManagerAndDataNodes; } diff --git a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java index c68e1eac3e1d8..d5925c03db480 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java @@ -2348,7 +2348,7 @@ public int numDataNodes() { } @Override - public int numDataAndMasterNodes() { + public int numDataAndClusterManagerNodes() { return filterNodes(nodes, DATA_NODE_PREDICATE.or(CLUSTER_MANAGER_NODE_PREDICATE)).size(); } diff --git a/test/framework/src/main/java/org/opensearch/test/TestCluster.java b/test/framework/src/main/java/org/opensearch/test/TestCluster.java index 26081d947431d..c2e90f0369e6c 100644 --- a/test/framework/src/main/java/org/opensearch/test/TestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/TestCluster.java @@ -127,7 +127,18 @@ public void assertAfterTest() throws Exception { /** * Returns the number of data and cluster-manager eligible nodes in the cluster. */ - public abstract int numDataAndMasterNodes(); + public int numDataAndClusterManagerNodes() { + return numDataAndMasterNodes(); + } + + /** + * Returns the number of data and cluster-manager eligible nodes in the cluster. + * @deprecated As of 2.1, because supporting inclusive language, replaced by {@link #numDataAndClusterManagerNodes()} + */ + @Deprecated + public int numDataAndMasterNodes() { + throw new UnsupportedOperationException("Must be overridden"); + } /** * Returns the http addresses of the nodes within the cluster. From 740f75d2051f27cffbbbfe2b601a85db9b30328c Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Fri, 29 Jul 2022 20:36:51 -0700 Subject: [PATCH 045/104] Deprecate class 'MasterService' and create alternative class 'ClusterManagerService' (#4022) To support inclusive language, the `master` terminology is going to be replaced by `cluster manager` in the code base. - Deprecate class `MasterService` and create alternative class `ClusterManagerService`. - Add a unit test to validate the method `ClusterService.getMasterService()` can still return an object in type `MasterService`. - Rename all the existing references of `MasterService` to `ClusterManagerService`, and rename the local variable names. - Deprecate public methods `ClusterServiceUtils.createMasterService(...)` and create alternative methods `createClusterManagerService(...)` Note: The class `ClusterManagerService` is a subclass of `MasterService`, the inheritance relationship is opposite from most of the other classes with `Master` in the name (that covered by issue https://github.com/opensearch-project/OpenSearch/issues/1684). The reason is: There is a public method that has return value in type `MasterService`, https://github.com/opensearch-project/OpenSearch/blob/388c80ad94529b1d9aad0a735c4740dce2932a32/server/src/main/java/org/opensearch/cluster/service/ClusterService.java#L221 And in the code for Performance Analyzer plugin, there is a local variable in type `MasterService`: https://github.com/opensearch-project/performance-analyzer/blob/5ee4809ac1cda6517ed871aeb12c6635203e7f1d/src/main/java/org/opensearch/performanceanalyzer/collectors/MasterServiceEventMetrics.java#L219 If making the old class `MasterService` a subclass of the new class `ClusterManagerService`, the above usage will be broken. Reversing the inheritance relationship, I'm able to keep the backwards compatibility of the method `getMasterService()` while deprecating the class `MasterService` and encourage using a new class `ClusterManagerService`. Signed-off-by: Tianli Feng --- .../cluster/coordination/ZenDiscoveryIT.java | 4 +- .../cluster/service/ClusterServiceIT.java | 6 +- .../health/TransportClusterHealthAction.java | 8 +- .../TransportPendingClusterTasksAction.java | 2 +- .../cluster/ClusterStateTaskListener.java | 6 +- .../cluster/coordination/Coordinator.java | 18 ++-- .../cluster/coordination/JoinHelper.java | 12 +-- .../service/ClusterManagerService.java | 24 +++++ .../cluster/service/ClusterService.java | 26 +++-- .../cluster/service/MasterService.java | 2 + .../common/settings/ClusterSettings.java | 6 +- .../common/util/concurrent/BaseFuture.java | 4 +- .../opensearch/discovery/DiscoveryModule.java | 6 +- .../main/java/org/opensearch/node/Node.java | 4 +- ...tAddVotingConfigExclusionsActionTests.java | 2 +- ...rnalClusterInfoServiceSchedulingTests.java | 10 +- .../cluster/coordination/NodeJoinTests.java | 32 +++---- .../routing/BatchedRerouteServiceTests.java | 2 +- .../cluster/service/ClusterServiceTests.java | 40 ++++++++ .../cluster/service/MasterServiceTests.java | 94 +++++++++---------- .../discovery/DiscoveryModuleTests.java | 8 +- .../snapshots/SnapshotResiliencyTests.java | 17 ++-- .../AbstractCoordinatorTestCase.java | 16 ++-- .../service/FakeThreadPoolMasterService.java | 2 +- .../opensearch/test/ClusterServiceUtils.java | 36 ++++--- .../BlockMasterServiceOnMaster.java | 2 +- .../BusyMasterServiceDisruption.java | 2 +- .../FakeThreadPoolMasterServiceTests.java | 12 +-- 28 files changed, 248 insertions(+), 155 deletions(-) create mode 100644 server/src/main/java/org/opensearch/cluster/service/ClusterManagerService.java create mode 100644 server/src/test/java/org/opensearch/cluster/service/ClusterServiceTests.java diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/ZenDiscoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/ZenDiscoveryIT.java index 66ff9eb15ae0e..4f47f2ecefd16 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/ZenDiscoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/ZenDiscoveryIT.java @@ -170,7 +170,9 @@ public void testDiscoveryStats() throws Exception { ensureGreen(); // ensures that all events are processed (in particular state recovery fully completed) assertBusy( () -> assertThat( - internalCluster().clusterService(internalCluster().getClusterManagerName()).getMasterService().numberOfPendingTasks(), + internalCluster().clusterService(internalCluster().getClusterManagerName()) + .getClusterManagerService() + .numberOfPendingTasks(), equalTo(0) ) ); // see https://github.com/elastic/elasticsearch/issues/24388 diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/service/ClusterServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/service/ClusterServiceIT.java index 252053f4968b4..7d9ffb23a2cf7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/service/ClusterServiceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/service/ClusterServiceIT.java @@ -391,7 +391,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS // The tasks can be re-ordered, so we need to check out-of-order Set controlSources = new HashSet<>(Arrays.asList("1", "2", "3", "4", "5", "6", "7", "8", "9", "10")); - List pendingClusterTasks = clusterService.getMasterService().pendingTasks(); + List pendingClusterTasks = clusterService.getClusterManagerService().pendingTasks(); assertThat(pendingClusterTasks.size(), greaterThanOrEqualTo(10)); assertThat(pendingClusterTasks.get(0).getSource().string(), equalTo("1")); assertThat(pendingClusterTasks.get(0).isExecuting(), equalTo(true)); @@ -413,7 +413,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS invoked2.await(); // whenever we test for no tasks, we need to wait since this is a live node - assertBusy(() -> assertTrue("Pending tasks not empty", clusterService.getMasterService().pendingTasks().isEmpty())); + assertBusy(() -> assertTrue("Pending tasks not empty", clusterService.getClusterManagerService().pendingTasks().isEmpty())); waitNoPendingTasksOnAll(); final CountDownLatch block2 = new CountDownLatch(1); @@ -453,7 +453,7 @@ public void onFailure(String source, Exception e) { } Thread.sleep(100); - pendingClusterTasks = clusterService.getMasterService().pendingTasks(); + pendingClusterTasks = clusterService.getClusterManagerService().pendingTasks(); assertThat(pendingClusterTasks.size(), greaterThanOrEqualTo(5)); controlSources = new HashSet<>(Arrays.asList("1", "2", "3", "4", "5")); for (PendingClusterTask task : pendingClusterTasks) { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java index 38198a1b74fb3..d6b9f5f40f32c 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -319,9 +319,9 @@ private boolean validateRequest(final ClusterHealthRequest request, ClusterState ClusterHealthResponse response = clusterHealth( request, clusterState, - clusterService.getMasterService().numberOfPendingTasks(), + clusterService.getClusterManagerService().numberOfPendingTasks(), allocationService.getNumberOfInFlightFetches(), - clusterService.getMasterService().getMaxTaskWaitTime() + clusterService.getClusterManagerService().getMaxTaskWaitTime() ); return prepareResponse(request, response, clusterState, indexNameExpressionResolver) == waitCount; } @@ -341,9 +341,9 @@ private ClusterHealthResponse getResponse( ClusterHealthResponse response = clusterHealth( request, clusterState, - clusterService.getMasterService().numberOfPendingTasks(), + clusterService.getClusterManagerService().numberOfPendingTasks(), allocationService.getNumberOfInFlightFetches(), - clusterService.getMasterService().getMaxTaskWaitTime() + clusterService.getClusterManagerService().getMaxTaskWaitTime() ); int readyCounter = prepareResponse(request, response, clusterState, indexNameExpressionResolver); boolean valid = (readyCounter == waitFor); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java index e08a76c5cfc2a..abe93fc45125a 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java @@ -106,7 +106,7 @@ protected void clusterManagerOperation( ActionListener listener ) { logger.trace("fetching pending tasks from cluster service"); - final List pendingTasks = clusterService.getMasterService().pendingTasks(); + final List pendingTasks = clusterService.getClusterManagerService().pendingTasks(); logger.trace("done fetching pending tasks from cluster service"); listener.onResponse(new PendingClusterTasksResponse(pendingTasks)); } diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java index 37108d356d86a..d6c4abfad7b8d 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java @@ -31,7 +31,7 @@ package org.opensearch.cluster; -import org.opensearch.cluster.service.MasterService; +import org.opensearch.cluster.service.ClusterManagerService; import java.util.List; @@ -49,7 +49,7 @@ public interface ClusterStateTaskListener { /** * called when the task was rejected because the local node is no longer cluster-manager. - * Used only for tasks submitted to {@link MasterService}. + * Used only for tasks submitted to {@link ClusterManagerService}. */ default void onNoLongerClusterManager(String source) { onFailure(source, new NotClusterManagerException("no longer cluster-manager. source: [" + source + "]")); @@ -57,7 +57,7 @@ default void onNoLongerClusterManager(String source) { /** * called when the task was rejected because the local node is no longer cluster-manager. - * Used only for tasks submitted to {@link MasterService}. + * Used only for tasks submitted to {@link ClusterManagerService}. * * @deprecated As of 2.1, because supporting inclusive language, replaced by {@link #onNoLongerClusterManager(String)} */ diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java index 36dd4aba104b2..1c7e7cd0419e2 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java @@ -58,7 +58,7 @@ import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.service.ClusterApplier; import org.opensearch.cluster.service.ClusterApplier.ClusterApplyListener; -import org.opensearch.cluster.service.MasterService; +import org.opensearch.cluster.service.ClusterManagerService; import org.opensearch.common.Booleans; import org.opensearch.common.Nullable; import org.opensearch.common.Priority; @@ -141,7 +141,7 @@ public class Coordinator extends AbstractLifecycleComponent implements Discovery private final boolean singleNodeDiscovery; private final ElectionStrategy electionStrategy; private final TransportService transportService; - private final MasterService masterService; + private final ClusterManagerService clusterManagerService; private final AllocationService allocationService; private final JoinHelper joinHelper; private final NodeRemovalClusterStateTaskExecutor nodeRemovalExecutor; @@ -191,7 +191,7 @@ public Coordinator( TransportService transportService, NamedWriteableRegistry namedWriteableRegistry, AllocationService allocationService, - MasterService masterService, + ClusterManagerService clusterManagerService, Supplier persistedStateSupplier, SeedHostsProvider seedHostsProvider, ClusterApplier clusterApplier, @@ -203,7 +203,7 @@ public Coordinator( ) { this.settings = settings; this.transportService = transportService; - this.masterService = masterService; + this.clusterManagerService = clusterManagerService; this.allocationService = allocationService; this.onJoinValidators = JoinTaskExecutor.addBuiltInJoinValidators(onJoinValidators); this.singleNodeDiscovery = DiscoveryModule.isSingleNodeDiscovery(settings); @@ -211,7 +211,7 @@ public Coordinator( this.joinHelper = new JoinHelper( settings, allocationService, - masterService, + clusterManagerService, transportService, this::getCurrentTerm, this::getStateForClusterManagerService, @@ -260,7 +260,7 @@ public Coordinator( ); this.nodeRemovalExecutor = new NodeRemovalClusterStateTaskExecutor(allocationService, logger); this.clusterApplier = clusterApplier; - masterService.setClusterStateSupplier(this::getStateForClusterManagerService); + clusterManagerService.setClusterStateSupplier(this::getStateForClusterManagerService); this.reconfigurator = new Reconfigurator(settings, clusterSettings); this.clusterBootstrapService = new ClusterBootstrapService( settings, @@ -310,7 +310,7 @@ private void onLeaderFailure(Exception e) { private void removeNode(DiscoveryNode discoveryNode, String reason) { synchronized (mutex) { if (mode == Mode.LEADER) { - masterService.submitStateUpdateTask( + clusterManagerService.submitStateUpdateTask( "node-left", new NodeRemovalClusterStateTaskExecutor.Task(discoveryNode, reason), ClusterStateTaskConfig.build(Priority.IMMEDIATE), @@ -757,7 +757,7 @@ void becomeFollower(String method, DiscoveryNode leaderNode) { } private void cleanClusterManagerService() { - masterService.submitStateUpdateTask("clean-up after stepping down as cluster-manager", new LocalClusterUpdateTask() { + clusterManagerService.submitStateUpdateTask("clean-up after stepping down as cluster-manager", new LocalClusterUpdateTask() { @Override public void onFailure(String source, Exception e) { // ignore @@ -1129,7 +1129,7 @@ private void scheduleReconfigurationIfNeeded() { final ClusterState state = getLastAcceptedState(); if (improveConfiguration(state) != state && reconfigurationTaskScheduled.compareAndSet(false, true)) { logger.trace("scheduling reconfiguration"); - masterService.submitStateUpdateTask("reconfigure", new ClusterStateUpdateTask(Priority.URGENT) { + clusterManagerService.submitStateUpdateTask("reconfigure", new ClusterStateUpdateTask(Priority.URGENT) { @Override public ClusterState execute(ClusterState currentState) { reconfigurationTaskScheduled.set(false); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java index 2eae6411eff69..656e6d220720f 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java @@ -46,7 +46,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.RerouteService; import org.opensearch.cluster.routing.allocation.AllocationService; -import org.opensearch.cluster.service.MasterService; +import org.opensearch.cluster.service.ClusterManagerService; import org.opensearch.common.Priority; import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.stream.StreamInput; @@ -106,7 +106,7 @@ public class JoinHelper { Setting.Property.Deprecated ); - private final MasterService masterService; + private final ClusterManagerService clusterManagerService; private final TransportService transportService; private volatile JoinTaskExecutor joinTaskExecutor; @@ -122,7 +122,7 @@ public class JoinHelper { JoinHelper( Settings settings, AllocationService allocationService, - MasterService masterService, + ClusterManagerService clusterManagerService, TransportService transportService, LongSupplier currentTermSupplier, Supplier currentStateSupplier, @@ -132,7 +132,7 @@ public class JoinHelper { RerouteService rerouteService, NodeHealthService nodeHealthService ) { - this.masterService = masterService; + this.clusterManagerService = clusterManagerService; this.transportService = transportService; this.nodeHealthService = nodeHealthService; this.joinTimeout = JOIN_TIMEOUT_SETTING.get(settings); @@ -458,7 +458,7 @@ class LeaderJoinAccumulator implements JoinAccumulator { public void handleJoinRequest(DiscoveryNode sender, JoinCallback joinCallback) { final JoinTaskExecutor.Task task = new JoinTaskExecutor.Task(sender, "join existing leader"); assert joinTaskExecutor != null; - masterService.submitStateUpdateTask( + clusterManagerService.submitStateUpdateTask( "node-join", task, ClusterStateTaskConfig.build(Priority.URGENT), @@ -543,7 +543,7 @@ public void close(Mode newMode) { pendingAsTasks.put(JoinTaskExecutor.newBecomeClusterManagerTask(), (source, e) -> {}); pendingAsTasks.put(JoinTaskExecutor.newFinishElectionTask(), (source, e) -> {}); joinTaskExecutor = joinTaskExecutorGenerator.get(); - masterService.submitStateUpdateTasks( + clusterManagerService.submitStateUpdateTasks( stateUpdateSource, pendingAsTasks, ClusterStateTaskConfig.build(Priority.URGENT), diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerService.java new file mode 100644 index 0000000000000..74b623dd95e6f --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerService.java @@ -0,0 +1,24 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.service; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.threadpool.ThreadPool; + +/** + * Main Cluster Manager Node Service + * + * @opensearch.internal + */ +public class ClusterManagerService extends MasterService { + public ClusterManagerService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { + super(settings, clusterSettings, threadPool); + } +} diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterService.java index 17ad8412de525..d393613118af8 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterService.java @@ -63,7 +63,7 @@ * @opensearch.internal */ public class ClusterService extends AbstractLifecycleComponent { - private final MasterService masterService; + private final ClusterManagerService clusterManagerService; private final ClusterApplierService clusterApplierService; @@ -93,7 +93,7 @@ public ClusterService(Settings settings, ClusterSettings clusterSettings, Thread this( settings, clusterSettings, - new MasterService(settings, clusterSettings, threadPool), + new ClusterManagerService(settings, clusterSettings, threadPool), new ClusterApplierService(Node.NODE_NAME_SETTING.get(settings), settings, clusterSettings, threadPool) ); } @@ -101,12 +101,12 @@ public ClusterService(Settings settings, ClusterSettings clusterSettings, Thread public ClusterService( Settings settings, ClusterSettings clusterSettings, - MasterService masterService, + ClusterManagerService clusterManagerService, ClusterApplierService clusterApplierService ) { this.settings = settings; this.nodeName = Node.NODE_NAME_SETTING.get(settings); - this.masterService = masterService; + this.clusterManagerService = clusterManagerService; this.operationRouting = new OperationRouting(settings, clusterSettings); this.clusterSettings = clusterSettings; this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); @@ -132,18 +132,18 @@ public RerouteService getRerouteService() { @Override protected synchronized void doStart() { clusterApplierService.start(); - masterService.start(); + clusterManagerService.start(); } @Override protected synchronized void doStop() { - masterService.stop(); + clusterManagerService.stop(); clusterApplierService.stop(); } @Override protected synchronized void doClose() { - masterService.close(); + clusterManagerService.close(); clusterApplierService.close(); } @@ -228,8 +228,14 @@ public void addLocalNodeMasterListener(LocalNodeMasterListener listener) { addLocalNodeClusterManagerListener(listener); } + public ClusterManagerService getClusterManagerService() { + return clusterManagerService; + } + + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #getClusterManagerService()} */ + @Deprecated public MasterService getMasterService() { - return masterService; + return clusterManagerService; } /** @@ -252,7 +258,7 @@ public ClusterApplierService getClusterApplierService() { public static boolean assertClusterOrClusterManagerStateThread() { assert Thread.currentThread().getName().contains(ClusterApplierService.CLUSTER_UPDATE_THREAD_NAME) - || Thread.currentThread().getName().contains(MasterService.CLUSTER_MANAGER_UPDATE_THREAD_NAME) + || Thread.currentThread().getName().contains(ClusterManagerService.CLUSTER_MANAGER_UPDATE_THREAD_NAME) : "not called from the master/cluster state update thread"; return true; } @@ -349,6 +355,6 @@ public void submitStateUpdateTasks( final ClusterStateTaskConfig config, final ClusterStateTaskExecutor executor ) { - masterService.submitStateUpdateTasks(source, tasks, config, executor); + clusterManagerService.submitStateUpdateTasks(source, tasks, config, executor); } } diff --git a/server/src/main/java/org/opensearch/cluster/service/MasterService.java b/server/src/main/java/org/opensearch/cluster/service/MasterService.java index e20d93e76ef01..b78707e994855 100644 --- a/server/src/main/java/org/opensearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/opensearch/cluster/service/MasterService.java @@ -87,7 +87,9 @@ * Main Master Node Service * * @opensearch.internal + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link ClusterManagerService}. */ +@Deprecated public class MasterService extends AbstractLifecycleComponent { private static final Logger logger = LogManager.getLogger(MasterService.class); diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 600c42d036fa1..bee3428188026 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -83,7 +83,7 @@ import org.opensearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.opensearch.cluster.service.ClusterApplierService; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.cluster.service.MasterService; +import org.opensearch.cluster.service.ClusterManagerService; import org.opensearch.common.logging.Loggers; import org.opensearch.common.network.NetworkModule; import org.opensearch.common.network.NetworkService; @@ -337,8 +337,8 @@ public void apply(Settings value, Settings current, Settings previous) { IndexModule.NODE_STORE_ALLOW_MMAP, ClusterApplierService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, ClusterService.USER_DEFINED_METADATA, - MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, // deprecated - MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, + ClusterManagerService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, // deprecated + ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, SearchService.DEFAULT_SEARCH_TIMEOUT_SETTING, SearchService.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS, TransportSearchAction.SHARD_COUNT_LIMIT_SETTING, diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/BaseFuture.java b/server/src/main/java/org/opensearch/common/util/concurrent/BaseFuture.java index f394809aaacf7..47fc4fc33bd74 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/BaseFuture.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/BaseFuture.java @@ -33,7 +33,7 @@ package org.opensearch.common.util.concurrent; import org.opensearch.cluster.service.ClusterApplierService; -import org.opensearch.cluster.service.MasterService; +import org.opensearch.cluster.service.ClusterManagerService; import org.opensearch.common.Nullable; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transports; @@ -109,7 +109,7 @@ protected boolean blockingAllowed() { return Transports.assertNotTransportThread(BLOCKING_OP_REASON) && ThreadPool.assertNotScheduleThread(BLOCKING_OP_REASON) && ClusterApplierService.assertNotClusterStateUpdateThread(BLOCKING_OP_REASON) - && MasterService.assertNotClusterManagerUpdateThread(BLOCKING_OP_REASON); + && ClusterManagerService.assertNotClusterManagerUpdateThread(BLOCKING_OP_REASON); } @Override diff --git a/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java b/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java index 6b746e5963bdc..44f44fa055b2b 100644 --- a/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java +++ b/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java @@ -41,7 +41,7 @@ import org.opensearch.cluster.routing.RerouteService; import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.service.ClusterApplier; -import org.opensearch.cluster.service.MasterService; +import org.opensearch.cluster.service.ClusterManagerService; import org.opensearch.common.Randomness; import org.opensearch.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.network.NetworkService; @@ -121,7 +121,7 @@ public DiscoveryModule( TransportService transportService, NamedWriteableRegistry namedWriteableRegistry, NetworkService networkService, - MasterService masterService, + ClusterManagerService clusterManagerService, ClusterApplier clusterApplier, ClusterSettings clusterSettings, List plugins, @@ -197,7 +197,7 @@ public DiscoveryModule( transportService, namedWriteableRegistry, allocationService, - masterService, + clusterManagerService, gatewayMetaState::getPersistedState, seedHostsProvider, clusterApplier, diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 7e90d82c4b663..bc5f17759d498 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -833,7 +833,7 @@ protected Node( transportService, namedWriteableRegistry, networkService, - clusterService.getMasterService(), + clusterService.getClusterManagerService(), clusterService.getClusterApplierService(), clusterService.getClusterSettings(), pluginsService.filterPlugins(DiscoveryPlugin.class), @@ -1089,7 +1089,7 @@ public Node start() throws NodeValidationException { injector.getInstance(GatewayService.class).start(); Discovery discovery = injector.getInstance(Discovery.class); - clusterService.getMasterService().setClusterStatePublisher(discovery::publish); + clusterService.getClusterManagerService().setClusterStatePublisher(discovery::publish); // Start the transport service now so the publish address will be added to the local disco node in ClusterService TransportService transportService = injector.getInstance(TransportService.class); diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java index 0c728c8314517..69570fa7b4640 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java @@ -692,7 +692,7 @@ private static class AdjustConfigurationForExclusions implements Listener { @Override public void onNewClusterState(ClusterState state) { - clusterService.getMasterService().submitStateUpdateTask("reconfiguration", new ClusterStateUpdateTask() { + clusterService.getClusterManagerService().submitStateUpdateTask("reconfiguration", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { assertThat(currentState, sameInstance(state)); diff --git a/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java b/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java index 7a073456af1ba..bb0d07f2c6032 100644 --- a/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java +++ b/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java @@ -49,7 +49,7 @@ import org.opensearch.cluster.service.ClusterApplierService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.cluster.service.FakeThreadPoolMasterService; -import org.opensearch.cluster.service.MasterService; +import org.opensearch.cluster.service.ClusterManagerService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor; @@ -89,14 +89,14 @@ protected PrioritizedOpenSearchThreadPoolExecutor createThreadPoolExecutor() { } }; - final MasterService masterService = new FakeThreadPoolMasterService( + final ClusterManagerService clusterManagerService = new FakeThreadPoolMasterService( "test", "clusterManagerService", threadPool, r -> { fail("cluster-manager service should not run any tasks"); } ); - final ClusterService clusterService = new ClusterService(settings, clusterSettings, masterService, clusterApplierService); + final ClusterService clusterService = new ClusterService(settings, clusterSettings, clusterManagerService, clusterApplierService); final FakeClusterInfoServiceClient client = new FakeClusterInfoServiceClient(threadPool); final InternalClusterInfoService clusterInfoService = new InternalClusterInfoService(settings, clusterService, threadPool, client); @@ -105,8 +105,8 @@ protected PrioritizedOpenSearchThreadPoolExecutor createThreadPoolExecutor() { clusterService.setNodeConnectionsService(ClusterServiceUtils.createNoOpNodeConnectionsService()); clusterApplierService.setInitialState(ClusterState.builder(new ClusterName("cluster")).nodes(noClusterManager).build()); - masterService.setClusterStatePublisher((clusterChangedEvent, publishListener, ackListener) -> fail("should not publish")); - masterService.setClusterStateSupplier(clusterApplierService::state); + clusterManagerService.setClusterStatePublisher((clusterChangedEvent, publishListener, ackListener) -> fail("should not publish")); + clusterManagerService.setClusterStateSupplier(clusterApplierService::state); clusterService.start(); final AtomicBoolean becameClusterManager1 = new AtomicBoolean(); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java index 8e81a3043426f..65a0867ccd26b 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java @@ -44,7 +44,7 @@ import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.service.FakeThreadPoolMasterService; -import org.opensearch.cluster.service.MasterService; +import org.opensearch.cluster.service.ClusterManagerService; import org.opensearch.cluster.service.MasterServiceTests; import org.opensearch.common.Randomness; import org.opensearch.common.settings.ClusterSettings; @@ -98,7 +98,7 @@ public class NodeJoinTests extends OpenSearchTestCase { private static ThreadPool threadPool; - private MasterService masterService; + private ClusterManagerService clusterManagerService; private Coordinator coordinator; private DeterministicTaskQueue deterministicTaskQueue; private Transport transport; @@ -117,7 +117,7 @@ public static void afterClass() { @After public void tearDown() throws Exception { super.tearDown(); - masterService.close(); + clusterManagerService.close(); } private static ClusterState initialState(DiscoveryNode localNode, long term, long version, VotingConfiguration config) { @@ -166,40 +166,40 @@ private void setupFakeClusterManagerServiceAndCoordinator(long term, ClusterStat } private void setupRealClusterManagerServiceAndCoordinator(long term, ClusterState initialState) { - MasterService masterService = new MasterService( + ClusterManagerService clusterManagerService = new ClusterManagerService( Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test_node").build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool ); AtomicReference clusterStateRef = new AtomicReference<>(initialState); - masterService.setClusterStatePublisher((event, publishListener, ackListener) -> { + clusterManagerService.setClusterStatePublisher((event, publishListener, ackListener) -> { clusterStateRef.set(event.state()); publishListener.onResponse(null); }); setupClusterManagerServiceAndCoordinator( term, initialState, - masterService, + clusterManagerService, threadPool, new Random(Randomness.get().nextLong()), () -> new StatusInfo(HEALTHY, "healthy-info") ); - masterService.setClusterStateSupplier(clusterStateRef::get); - masterService.start(); + clusterManagerService.setClusterStateSupplier(clusterStateRef::get); + clusterManagerService.start(); } private void setupClusterManagerServiceAndCoordinator( long term, ClusterState initialState, - MasterService masterService, + ClusterManagerService clusterManagerService, ThreadPool threadPool, Random random, NodeHealthService nodeHealthService ) { - if (this.masterService != null || coordinator != null) { + if (this.clusterManagerService != null || coordinator != null) { throw new IllegalStateException("method setupClusterManagerServiceAndCoordinator can only be called once"); } - this.masterService = masterService; + this.clusterManagerService = clusterManagerService; CapturingTransport capturingTransport = new CapturingTransport() { @Override protected void onSendRequest(long requestId, String action, TransportRequest request, DiscoveryNode destination) { @@ -231,7 +231,7 @@ protected void onSendRequest(long requestId, String action, TransportRequest req transportService, writableRegistry(), OpenSearchAllocationTestCase.createAllocationService(Settings.EMPTY), - masterService, + clusterManagerService, () -> new InMemoryPersistedState(term, initialState), r -> emptyList(), new NoOpClusterApplier(), @@ -514,7 +514,7 @@ public void testJoinUpdateVotingConfigExclusion() throws Exception { ); assertTrue( - MasterServiceTests.discoveryState(masterService) + MasterServiceTests.discoveryState(clusterManagerService) .getVotingConfigExclusions() .stream() .anyMatch( @@ -746,7 +746,7 @@ public void testConcurrentJoining() { throw new RuntimeException(e); } - assertTrue(MasterServiceTests.discoveryState(masterService).nodes().isLocalNodeElectedMaster()); + assertTrue(MasterServiceTests.discoveryState(clusterManagerService).nodes().isLocalNodeElectedMaster()); for (DiscoveryNode successfulNode : successfulNodes) { assertTrue(successfulNode + " joined cluster", clusterStateHasNode(successfulNode)); assertFalse(successfulNode + " voted for cluster-manager", coordinator.missingJoinVoteFrom(successfulNode)); @@ -776,10 +776,10 @@ public void testJoinElectedLeaderWithDeprecatedMasterRole() { } private boolean isLocalNodeElectedMaster() { - return MasterServiceTests.discoveryState(masterService).nodes().isLocalNodeElectedMaster(); + return MasterServiceTests.discoveryState(clusterManagerService).nodes().isLocalNodeElectedMaster(); } private boolean clusterStateHasNode(DiscoveryNode node) { - return node.equals(MasterServiceTests.discoveryState(masterService).nodes().get(node.getId())); + return node.equals(MasterServiceTests.discoveryState(clusterManagerService).nodes().get(node.getId())); } } diff --git a/server/src/test/java/org/opensearch/cluster/routing/BatchedRerouteServiceTests.java b/server/src/test/java/org/opensearch/cluster/routing/BatchedRerouteServiceTests.java index 241ee3fca3553..796d73aa715de 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/BatchedRerouteServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/BatchedRerouteServiceTests.java @@ -223,7 +223,7 @@ public void testNotifiesOnFailure() throws InterruptedException { } })); if (rarely()) { - clusterService.getMasterService() + clusterService.getClusterManagerService() .setClusterStatePublisher( randomBoolean() ? ClusterServiceUtils.createClusterStatePublisher(clusterService.getClusterApplierService()) diff --git a/server/src/test/java/org/opensearch/cluster/service/ClusterServiceTests.java b/server/src/test/java/org/opensearch/cluster/service/ClusterServiceTests.java new file mode 100644 index 0000000000000..ed1131a898ad9 --- /dev/null +++ b/server/src/test/java/org/opensearch/cluster/service/ClusterServiceTests.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.service; + +import org.junit.After; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; + +import static org.hamcrest.Matchers.equalTo; + +public class ClusterServiceTests extends OpenSearchTestCase { + private final TestThreadPool threadPool = new TestThreadPool(ClusterServiceTests.class.getName()); + + @After + public void terminateThreadPool() { + terminate(threadPool); + } + + public void testDeprecatedGetMasterServiceBWC() { + try ( + ClusterService clusterService = new ClusterService( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool + ) + ) { + MasterService masterService = clusterService.getMasterService(); + ClusterManagerService clusterManagerService = clusterService.getClusterManagerService(); + assertThat(masterService, equalTo(clusterManagerService)); + } + } +} diff --git a/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java b/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java index e39520ce72568..a397f295bcaf2 100644 --- a/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java @@ -122,9 +122,9 @@ public void randomizeCurrentTime() { relativeTimeInMillis = randomLongBetween(0L, 1L << 62); } - private MasterService createClusterManagerService(boolean makeClusterManager) { + private ClusterManagerService createClusterManagerService(boolean makeClusterManager) { final DiscoveryNode localNode = new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); - final MasterService masterService = new MasterService( + final ClusterManagerService clusterManagerService = new ClusterManagerService( Settings.builder() .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), MasterServiceTests.class.getSimpleName()) .put(Node.NODE_NAME_SETTING.getKey(), "test_node") @@ -142,17 +142,17 @@ private MasterService createClusterManagerService(boolean makeClusterManager) { .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK) .build(); final AtomicReference clusterStateRef = new AtomicReference<>(initialClusterState); - masterService.setClusterStatePublisher((event, publishListener, ackListener) -> { + clusterManagerService.setClusterStatePublisher((event, publishListener, ackListener) -> { clusterStateRef.set(event.state()); publishListener.onResponse(null); }); - masterService.setClusterStateSupplier(clusterStateRef::get); - masterService.start(); - return masterService; + clusterManagerService.setClusterStateSupplier(clusterStateRef::get); + clusterManagerService.start(); + return clusterManagerService; } public void testClusterManagerAwareExecution() throws Exception { - final MasterService nonClusterManager = createClusterManagerService(false); + final ClusterManagerService nonClusterManager = createClusterManagerService(false); final boolean[] taskFailed = { false }; final CountDownLatch latch1 = new CountDownLatch(1); @@ -195,7 +195,7 @@ public void onFailure(String source, Exception e) { } public void testThreadContext() throws InterruptedException { - final MasterService masterService = createClusterManagerService(true); + final ClusterManagerService clusterManagerService = createClusterManagerService(true); final CountDownLatch latch = new CountDownLatch(1); try (ThreadContext.StoredContext ignored = threadPool.getThreadContext().stashContext()) { @@ -209,7 +209,7 @@ public void testThreadContext() throws InterruptedException { final TimeValue ackTimeout = randomBoolean() ? TimeValue.ZERO : TimeValue.timeValueMillis(randomInt(10000)); final TimeValue clusterManagerTimeout = randomBoolean() ? TimeValue.ZERO : TimeValue.timeValueMillis(randomInt(10000)); - masterService.submitStateUpdateTask("test", new AckedClusterStateUpdateTask(null, null) { + clusterManagerService.submitStateUpdateTask("test", new AckedClusterStateUpdateTask(null, null) { @Override public ClusterState execute(ClusterState currentState) { assertTrue(threadPool.getThreadContext().isSystemContext()); @@ -281,7 +281,7 @@ public void onAckTimeout() { latch.await(); - masterService.close(); + clusterManagerService.close(); } /* @@ -293,8 +293,8 @@ public void testClusterStateTaskListenerThrowingExceptionIsOkay() throws Interru final CountDownLatch latch = new CountDownLatch(1); AtomicBoolean published = new AtomicBoolean(); - try (MasterService masterService = createClusterManagerService(true)) { - masterService.submitStateUpdateTask( + try (ClusterManagerService clusterManagerService = createClusterManagerService(true)) { + clusterManagerService.submitStateUpdateTask( "testClusterStateTaskListenerThrowingExceptionIsOkay", new Object(), ClusterStateTaskConfig.build(Priority.NORMAL), @@ -422,8 +422,8 @@ public void testClusterStateUpdateLogging() throws Exception { ) ); - try (MasterService masterService = createClusterManagerService(true)) { - masterService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() { + try (ClusterManagerService clusterManagerService = createClusterManagerService(true)) { + clusterManagerService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { relativeTimeInMillis += TimeValue.timeValueSeconds(1).millis(); @@ -438,7 +438,7 @@ public void onFailure(String source, Exception e) { fail(); } }); - masterService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() { + clusterManagerService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { relativeTimeInMillis += TimeValue.timeValueSeconds(2).millis(); @@ -453,7 +453,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS @Override public void onFailure(String source, Exception e) {} }); - masterService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() { + clusterManagerService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { relativeTimeInMillis += TimeValue.timeValueSeconds(3).millis(); @@ -470,7 +470,7 @@ public void onFailure(String source, Exception e) { fail(); } }); - masterService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() { + clusterManagerService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { return currentState; @@ -618,7 +618,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS } }; - try (MasterService masterService = createClusterManagerService(true)) { + try (ClusterManagerService clusterManagerService = createClusterManagerService(true)) { final ConcurrentMap submittedTasksPerThread = new ConcurrentHashMap<>(); CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads); for (int i = 0; i < numberOfThreads; i++) { @@ -633,7 +633,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS submittedTasksPerThread.computeIfAbsent(threadName, key -> new AtomicInteger()).addAndGet(tasks.size()); final TaskExecutor executor = assignment.v1(); if (tasks.size() == 1) { - masterService.submitStateUpdateTask( + clusterManagerService.submitStateUpdateTask( threadName, tasks.stream().findFirst().get(), ClusterStateTaskConfig.build(randomFrom(Priority.values())), @@ -643,7 +643,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS } else { Map taskListeners = new HashMap<>(); tasks.forEach(t -> taskListeners.put(t, listener)); - masterService.submitStateUpdateTasks( + clusterManagerService.submitStateUpdateTasks( threadName, taskListeners, ClusterStateTaskConfig.build(randomFrom(Priority.values())), @@ -697,8 +697,8 @@ public void testBlockingCallInClusterStateTaskListenerFails() throws Interrupted final CountDownLatch latch = new CountDownLatch(1); final AtomicReference assertionRef = new AtomicReference<>(); - try (MasterService masterService = createClusterManagerService(true)) { - masterService.submitStateUpdateTask( + try (ClusterManagerService clusterManagerService = createClusterManagerService(true)) { + clusterManagerService.submitStateUpdateTask( "testBlockingCallInClusterStateTaskListenerFails", new Object(), ClusterStateTaskConfig.build(Priority.NORMAL), @@ -789,7 +789,7 @@ public void testLongClusterStateUpdateLogging() throws Exception { ); try ( - MasterService masterService = new MasterService( + ClusterManagerService clusterManagerService = new ClusterManagerService( Settings.builder() .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), MasterServiceTests.class.getSimpleName()) .put(Node.NODE_NAME_SETTING.getKey(), "test_node") @@ -811,14 +811,14 @@ public void testLongClusterStateUpdateLogging() throws Exception { .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK) .build(); final AtomicReference clusterStateRef = new AtomicReference<>(initialClusterState); - masterService.setClusterStatePublisher((event, publishListener, ackListener) -> { + clusterManagerService.setClusterStatePublisher((event, publishListener, ackListener) -> { if (event.source().contains("test5")) { - relativeTimeInMillis += MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( + relativeTimeInMillis += ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( Settings.EMPTY ).millis() + randomLongBetween(1, 1000000); } if (event.source().contains("test6")) { - relativeTimeInMillis += MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( + relativeTimeInMillis += ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( Settings.EMPTY ).millis() + randomLongBetween(1, 1000000); throw new OpenSearchException("simulated error during slow publication which should trigger logging"); @@ -826,17 +826,17 @@ public void testLongClusterStateUpdateLogging() throws Exception { clusterStateRef.set(event.state()); publishListener.onResponse(null); }); - masterService.setClusterStateSupplier(clusterStateRef::get); - masterService.start(); + clusterManagerService.setClusterStateSupplier(clusterStateRef::get); + clusterManagerService.start(); final CountDownLatch latch = new CountDownLatch(6); final CountDownLatch processedFirstTask = new CountDownLatch(1); - masterService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() { + clusterManagerService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { relativeTimeInMillis += randomLongBetween( 0L, - MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY).millis() + ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY).millis() ); return currentState; } @@ -854,10 +854,10 @@ public void onFailure(String source, Exception e) { }); processedFirstTask.await(); - masterService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() { + clusterManagerService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { - relativeTimeInMillis += MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( + relativeTimeInMillis += ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( Settings.EMPTY ).millis() + randomLongBetween(1, 1000000); throw new IllegalArgumentException("Testing handling of exceptions in the cluster state task"); @@ -873,10 +873,10 @@ public void onFailure(String source, Exception e) { latch.countDown(); } }); - masterService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() { + clusterManagerService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { - relativeTimeInMillis += MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( + relativeTimeInMillis += ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( Settings.EMPTY ).millis() + randomLongBetween(1, 1000000); return ClusterState.builder(currentState).incrementVersion().build(); @@ -892,10 +892,10 @@ public void onFailure(String source, Exception e) { fail(); } }); - masterService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() { + clusterManagerService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { - relativeTimeInMillis += MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( + relativeTimeInMillis += ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( Settings.EMPTY ).millis() + randomLongBetween(1, 1000000); return currentState; @@ -911,7 +911,7 @@ public void onFailure(String source, Exception e) { fail(); } }); - masterService.submitStateUpdateTask("test5", new ClusterStateUpdateTask() { + clusterManagerService.submitStateUpdateTask("test5", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { return ClusterState.builder(currentState).incrementVersion().build(); @@ -927,7 +927,7 @@ public void onFailure(String source, Exception e) { fail(); } }); - masterService.submitStateUpdateTask("test6", new ClusterStateUpdateTask() { + clusterManagerService.submitStateUpdateTask("test6", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { return ClusterState.builder(currentState).incrementVersion().build(); @@ -945,7 +945,7 @@ public void onFailure(String source, Exception e) { }); // Additional update task to make sure all previous logging made it to the loggerName // We don't check logging for this on since there is no guarantee that it will occur before our check - masterService.submitStateUpdateTask("test7", new ClusterStateUpdateTask() { + clusterManagerService.submitStateUpdateTask("test7", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { return currentState; @@ -972,7 +972,7 @@ public void testAcking() throws InterruptedException { final DiscoveryNode node2 = new DiscoveryNode("node2", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); final DiscoveryNode node3 = new DiscoveryNode("node3", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); try ( - MasterService masterService = new MasterService( + ClusterManagerService clusterManagerService = new ClusterManagerService( Settings.builder() .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), MasterServiceTests.class.getSimpleName()) .put(Node.NODE_NAME_SETTING.getKey(), "test_node") @@ -987,9 +987,9 @@ public void testAcking() throws InterruptedException { .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK) .build(); final AtomicReference publisherRef = new AtomicReference<>(); - masterService.setClusterStatePublisher((e, pl, al) -> publisherRef.get().publish(e, pl, al)); - masterService.setClusterStateSupplier(() -> initialClusterState); - masterService.start(); + clusterManagerService.setClusterStatePublisher((e, pl, al) -> publisherRef.get().publish(e, pl, al)); + clusterManagerService.setClusterStateSupplier(() -> initialClusterState); + clusterManagerService.start(); // check that we don't time out before even committing the cluster state { @@ -1001,7 +1001,7 @@ public void testAcking() throws InterruptedException { ) ); - masterService.submitStateUpdateTask("test2", new AckedClusterStateUpdateTask(null, null) { + clusterManagerService.submitStateUpdateTask("test2", new AckedClusterStateUpdateTask(null, null) { @Override public ClusterState execute(ClusterState currentState) { return ClusterState.builder(currentState).build(); @@ -1056,7 +1056,7 @@ public void onAckTimeout() { ackListener.onNodeAck(node3, null); }); - masterService.submitStateUpdateTask("test2", new AckedClusterStateUpdateTask(null, null) { + clusterManagerService.submitStateUpdateTask("test2", new AckedClusterStateUpdateTask(null, null) { @Override public ClusterState execute(ClusterState currentState) { return ClusterState.builder(currentState).build(); @@ -1114,8 +1114,8 @@ public void testDeprecatedMasterServiceUpdateTaskThreadName() { /** * Returns the cluster state that the cluster-manager service uses (and that is provided by the discovery layer) */ - public static ClusterState discoveryState(MasterService masterService) { - return masterService.state(); + public static ClusterState discoveryState(ClusterManagerService clusterManagerService) { + return clusterManagerService.state(); } } diff --git a/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java b/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java index d1e3f406b4933..5588e9c1ceba8 100644 --- a/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java +++ b/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java @@ -37,7 +37,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.RerouteService; import org.opensearch.cluster.service.ClusterApplier; -import org.opensearch.cluster.service.MasterService; +import org.opensearch.cluster.service.ClusterManagerService; import org.opensearch.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.ClusterSettings; @@ -70,7 +70,7 @@ public class DiscoveryModuleTests extends OpenSearchTestCase { private TransportService transportService; private NamedWriteableRegistry namedWriteableRegistry; - private MasterService masterService; + private ClusterManagerService clusterManagerService; private ClusterApplier clusterApplier; private ThreadPool threadPool; private ClusterSettings clusterSettings; @@ -93,7 +93,7 @@ public void setupDummyServices() { threadPool = mock(ThreadPool.class); when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null); - masterService = mock(MasterService.class); + clusterManagerService = mock(ClusterManagerService.class); namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); clusterApplier = mock(ClusterApplier.class); clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); @@ -112,7 +112,7 @@ private DiscoveryModule newModule(Settings settings, List plugi transportService, namedWriteableRegistry, null, - masterService, + clusterManagerService, clusterApplier, clusterSettings, plugins, diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 9a7f0e2005555..1369309218519 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -144,9 +144,9 @@ import org.opensearch.cluster.routing.allocation.AwarenessReplicaBalance; import org.opensearch.cluster.routing.allocation.command.AllocateEmptyPrimaryAllocationCommand; import org.opensearch.cluster.service.ClusterApplierService; +import org.opensearch.cluster.service.ClusterManagerService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.cluster.service.FakeThreadPoolMasterService; -import org.opensearch.cluster.service.MasterService; import org.opensearch.common.CheckedConsumer; import org.opensearch.common.Nullable; import org.opensearch.common.io.stream.NamedWriteableRegistry; @@ -1646,7 +1646,7 @@ private final class TestClusterNode { private final DiscoveryNode node; - private final MasterService masterService; + private final ClusterManagerService clusterManagerService; private final AllocationService allocationService; @@ -1666,13 +1666,18 @@ private final class TestClusterNode { this.node = node; final Environment environment = createEnvironment(node.getName()); threadPool = deterministicTaskQueue.getThreadPool(runnable -> CoordinatorTests.onNodeLog(node, runnable)); - masterService = new FakeThreadPoolMasterService(node.getName(), "test", threadPool, deterministicTaskQueue::scheduleNow); + clusterManagerService = new FakeThreadPoolMasterService( + node.getName(), + "test", + threadPool, + deterministicTaskQueue::scheduleNow + ); final Settings settings = environment.settings(); final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); clusterService = new ClusterService( settings, clusterSettings, - masterService, + clusterManagerService, new ClusterApplierService(node.getName(), settings, clusterSettings, threadPool) { @Override protected PrioritizedOpenSearchThreadPoolExecutor createThreadPoolExecutor() { @@ -2210,7 +2215,7 @@ public void start(ClusterState initialState) { transportService, namedWriteableRegistry, allocationService, - masterService, + clusterManagerService, () -> persistedState, hostsResolver -> nodes.values() .stream() @@ -2224,7 +2229,7 @@ public void start(ClusterState initialState) { ElectionStrategy.DEFAULT_INSTANCE, () -> new StatusInfo(HEALTHY, "healthy-info") ); - masterService.setClusterStatePublisher(coordinator); + clusterManagerService.setClusterStatePublisher(coordinator); coordinator.start(); clusterService.getClusterApplierService().setNodeConnectionsService(nodeConnectionsService); nodeConnectionsService.start(); diff --git a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java index d6ec70238f70d..9e58e3dcfe0e8 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java @@ -557,7 +557,7 @@ void stabilise(long stabilisationDurationMillis) { final ClusterNode leader = getAnyLeader(); final long leaderTerm = leader.coordinator.getCurrentTerm(); - final int pendingTaskCount = leader.masterService.getFakeMasterServicePendingTaskCount(); + final int pendingTaskCount = leader.clusterManagerService.getFakeMasterServicePendingTaskCount(); runFor((pendingTaskCount + 1) * DEFAULT_CLUSTER_STATE_UPDATE_DELAY, "draining task queue"); final Matcher isEqualToLeaderVersion = equalTo(leader.coordinator.getLastAcceptedState().getVersion()); @@ -1026,7 +1026,7 @@ class ClusterNode { private final DiscoveryNode localNode; final MockPersistedState persistedState; final Settings nodeSettings; - private AckedFakeThreadPoolMasterService masterService; + private AckedFakeThreadPoolMasterService clusterManagerService; private DisruptableClusterApplierService clusterApplierService; private ClusterService clusterService; TransportService transportService; @@ -1106,7 +1106,7 @@ protected Optional getDisruptableMockTransport(Transpo null, emptySet() ); - masterService = new AckedFakeThreadPoolMasterService( + clusterManagerService = new AckedFakeThreadPoolMasterService( localNode.getId(), "test", threadPool, @@ -1120,7 +1120,7 @@ protected Optional getDisruptableMockTransport(Transpo deterministicTaskQueue, threadPool ); - clusterService = new ClusterService(settings, clusterSettings, masterService, clusterApplierService); + clusterService = new ClusterService(settings, clusterSettings, clusterManagerService, clusterApplierService); clusterService.setNodeConnectionsService( new NodeConnectionsService(clusterService.getSettings(), threadPool, transportService) ); @@ -1135,7 +1135,7 @@ protected Optional getDisruptableMockTransport(Transpo transportService, writableRegistry(), allocationService, - masterService, + clusterManagerService, this::getPersistedState, Cluster.this::provideSeedHosts, clusterApplierService, @@ -1145,7 +1145,7 @@ protected Optional getDisruptableMockTransport(Transpo getElectionStrategy(), nodeHealthService ); - masterService.setClusterStatePublisher(coordinator); + clusterManagerService.setClusterStatePublisher(coordinator); final GatewayService gatewayService = new GatewayService( settings, allocationService, @@ -1334,11 +1334,11 @@ AckCollector submitUpdateTask( onNode(() -> { logger.trace("[{}] submitUpdateTask: enqueueing [{}]", localNode.getId(), source); final long submittedTerm = coordinator.getCurrentTerm(); - masterService.submitStateUpdateTask(source, new ClusterStateUpdateTask() { + clusterManagerService.submitStateUpdateTask(source, new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { assertThat(currentState.term(), greaterThanOrEqualTo(submittedTerm)); - masterService.nextAckCollector = ackCollector; + clusterManagerService.nextAckCollector = ackCollector; return clusterStateUpdate.apply(currentState); } diff --git a/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolMasterService.java b/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolMasterService.java index c532a0cc36472..7826a3aef9801 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolMasterService.java +++ b/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolMasterService.java @@ -55,7 +55,7 @@ import static org.apache.lucene.tests.util.LuceneTestCase.random; import static org.opensearch.test.OpenSearchTestCase.randomInt; -public class FakeThreadPoolMasterService extends MasterService { +public class FakeThreadPoolMasterService extends ClusterManagerService { private static final Logger logger = LogManager.getLogger(FakeThreadPoolMasterService.class); private final String name; diff --git a/test/framework/src/main/java/org/opensearch/test/ClusterServiceUtils.java b/test/framework/src/main/java/org/opensearch/test/ClusterServiceUtils.java index 55ae70df9892c..8f4f510da5ec3 100644 --- a/test/framework/src/main/java/org/opensearch/test/ClusterServiceUtils.java +++ b/test/framework/src/main/java/org/opensearch/test/ClusterServiceUtils.java @@ -46,6 +46,7 @@ import org.opensearch.cluster.service.ClusterApplier; import org.opensearch.cluster.service.ClusterApplier.ClusterApplyListener; import org.opensearch.cluster.service.ClusterApplierService; +import org.opensearch.cluster.service.ClusterManagerService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.cluster.service.MasterService; import org.opensearch.common.settings.ClusterSettings; @@ -61,28 +62,40 @@ public class ClusterServiceUtils { - public static MasterService createMasterService(ThreadPool threadPool, ClusterState initialClusterState) { - MasterService masterService = new MasterService( + public static ClusterManagerService createClusterManagerService(ThreadPool threadPool, ClusterState initialClusterState) { + ClusterManagerService clusterManagerService = new ClusterManagerService( Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test_cluster_manager_node").build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool ); AtomicReference clusterStateRef = new AtomicReference<>(initialClusterState); - masterService.setClusterStatePublisher((event, publishListener, ackListener) -> { + clusterManagerService.setClusterStatePublisher((event, publishListener, ackListener) -> { clusterStateRef.set(event.state()); publishListener.onResponse(null); }); - masterService.setClusterStateSupplier(clusterStateRef::get); - masterService.start(); - return masterService; + clusterManagerService.setClusterStateSupplier(clusterStateRef::get); + clusterManagerService.start(); + return clusterManagerService; } - public static MasterService createMasterService(ThreadPool threadPool, DiscoveryNode localNode) { + public static ClusterManagerService createClusterManagerService(ThreadPool threadPool, DiscoveryNode localNode) { ClusterState initialClusterState = ClusterState.builder(new ClusterName(ClusterServiceUtils.class.getSimpleName())) .nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).clusterManagerNodeId(localNode.getId())) .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK) .build(); - return createMasterService(threadPool, initialClusterState); + return createClusterManagerService(threadPool, initialClusterState); + } + + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #createClusterManagerService(ThreadPool, ClusterState)} */ + @Deprecated + public static MasterService createMasterService(ThreadPool threadPool, ClusterState initialClusterState) { + return createClusterManagerService(threadPool, initialClusterState); + } + + /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #createClusterManagerService(ThreadPool, DiscoveryNode)} */ + @Deprecated + public static MasterService createMasterService(ThreadPool threadPool, DiscoveryNode localNode) { + return createClusterManagerService(threadPool, localNode); } public static void setState(ClusterApplierService executor, ClusterState clusterState) { @@ -114,7 +127,7 @@ public void onFailure(String source, Exception e) { } } - public static void setState(MasterService executor, ClusterState clusterState) { + public static void setState(ClusterManagerService executor, ClusterState clusterState) { CountDownLatch latch = new CountDownLatch(1); executor.submitStateUpdateTask("test setting state", new ClusterStateUpdateTask() { @Override @@ -164,8 +177,9 @@ public static ClusterService createClusterService(ThreadPool threadPool, Discove .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK) .build(); clusterService.getClusterApplierService().setInitialState(initialClusterState); - clusterService.getMasterService().setClusterStatePublisher(createClusterStatePublisher(clusterService.getClusterApplierService())); - clusterService.getMasterService().setClusterStateSupplier(clusterService.getClusterApplierService()::state); + clusterService.getClusterManagerService() + .setClusterStatePublisher(createClusterStatePublisher(clusterService.getClusterApplierService())); + clusterService.getClusterManagerService().setClusterStateSupplier(clusterService.getClusterApplierService()::state); clusterService.start(); return clusterService; } diff --git a/test/framework/src/main/java/org/opensearch/test/disruption/BlockMasterServiceOnMaster.java b/test/framework/src/main/java/org/opensearch/test/disruption/BlockMasterServiceOnMaster.java index 19aaba154109d..0a626f1473826 100644 --- a/test/framework/src/main/java/org/opensearch/test/disruption/BlockMasterServiceOnMaster.java +++ b/test/framework/src/main/java/org/opensearch/test/disruption/BlockMasterServiceOnMaster.java @@ -66,7 +66,7 @@ public void startDisrupting() { boolean success = disruptionLatch.compareAndSet(null, new CountDownLatch(1)); assert success : "startDisrupting called without waiting on stopDisrupting to complete"; final CountDownLatch started = new CountDownLatch(1); - clusterService.getMasterService().submitStateUpdateTask("service_disruption_block", new ClusterStateUpdateTask() { + clusterService.getClusterManagerService().submitStateUpdateTask("service_disruption_block", new ClusterStateUpdateTask() { @Override public Priority priority() { return Priority.IMMEDIATE; diff --git a/test/framework/src/main/java/org/opensearch/test/disruption/BusyMasterServiceDisruption.java b/test/framework/src/main/java/org/opensearch/test/disruption/BusyMasterServiceDisruption.java index d764c23404a91..fb40298bb98ef 100644 --- a/test/framework/src/main/java/org/opensearch/test/disruption/BusyMasterServiceDisruption.java +++ b/test/framework/src/main/java/org/opensearch/test/disruption/BusyMasterServiceDisruption.java @@ -67,7 +67,7 @@ public void startDisrupting() { } private void submitTask(ClusterService clusterService) { - clusterService.getMasterService().submitStateUpdateTask("service_disruption_block", new ClusterStateUpdateTask(priority) { + clusterService.getClusterManagerService().submitStateUpdateTask("service_disruption_block", new ClusterStateUpdateTask(priority) { @Override public ClusterState execute(ClusterState currentState) { if (active.get()) { diff --git a/test/framework/src/test/java/org/opensearch/cluster/service/FakeThreadPoolMasterServiceTests.java b/test/framework/src/test/java/org/opensearch/cluster/service/FakeThreadPoolMasterServiceTests.java index b4cdb0b33391a..dabeeb6742ec4 100644 --- a/test/framework/src/test/java/org/opensearch/cluster/service/FakeThreadPoolMasterServiceTests.java +++ b/test/framework/src/test/java/org/opensearch/cluster/service/FakeThreadPoolMasterServiceTests.java @@ -84,21 +84,21 @@ public void testFakeClusterManagerService() { doAnswer(invocationOnMock -> runnableTasks.add((Runnable) invocationOnMock.getArguments()[0])).when(executorService).execute(any()); when(mockThreadPool.generic()).thenReturn(executorService); - FakeThreadPoolMasterService masterService = new FakeThreadPoolMasterService( + FakeThreadPoolMasterService clusterManagerService = new FakeThreadPoolMasterService( "test_node", "test", mockThreadPool, runnableTasks::add ); - masterService.setClusterStateSupplier(lastClusterStateRef::get); - masterService.setClusterStatePublisher((event, publishListener, ackListener) -> { + clusterManagerService.setClusterStateSupplier(lastClusterStateRef::get); + clusterManagerService.setClusterStatePublisher((event, publishListener, ackListener) -> { lastClusterStateRef.set(event.state()); publishingCallback.set(publishListener); }); - masterService.start(); + clusterManagerService.start(); AtomicBoolean firstTaskCompleted = new AtomicBoolean(); - masterService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() { + clusterManagerService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { return ClusterState.builder(currentState) @@ -138,7 +138,7 @@ public void onFailure(String source, Exception e) { assertThat(runnableTasks.size(), equalTo(0)); AtomicBoolean secondTaskCompleted = new AtomicBoolean(); - masterService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() { + clusterManagerService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { return ClusterState.builder(currentState) From bea5d1a9f04f997f0c591d62cab496e3fb8449b7 Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Sun, 31 Jul 2022 20:48:15 -0700 Subject: [PATCH 046/104] Rename classes with name 'MasterService' to 'ClusterManagerService' in directory 'test/framework' (#4051) To support inclusive language, the master terminology is going to be replaced by cluster manager in the code base. After the class MasterService has been deprecated and class ClusterManagerService has been created in #4022 / commit 740f75d, the classes in `test/framework` directory with name 'MasterService' can be deprecated and renamed. - Rename the following classes in `test/framework` directory: FakeThreadPoolMasterService -> FakeThreadPoolClusterManagerService BlockMasterServiceOnMaster -> BlockClusterManagerServiceOnClusterManager BusyMasterServiceDisruption -> BusyClusterManagerServiceDisruption In the following commit, I will add back the above 3 classes with the old name and deprecate them, to keep the backwards compatibility. Signed-off-by: Tianli Feng --- .../discovery/ClusterManagerDisruptionIT.java | 4 ++-- .../snapshots/DedicatedClusterSnapshotRestoreIT.java | 4 ++-- .../InternalClusterInfoServiceSchedulingTests.java | 4 ++-- .../opensearch/cluster/coordination/NodeJoinTests.java | 4 ++-- .../opensearch/snapshots/SnapshotResiliencyTests.java | 4 ++-- .../coordination/AbstractCoordinatorTestCase.java | 10 +++++----- ...e.java => FakeThreadPoolClusterManagerService.java} | 8 ++++---- ...=> BlockClusterManagerServiceOnClusterManager.java} | 4 ++-- ...n.java => BusyClusterManagerServiceDisruption.java} | 4 ++-- ...a => FakeThreadPoolClusterManagerServiceTests.java} | 4 ++-- 10 files changed, 25 insertions(+), 25 deletions(-) rename test/framework/src/main/java/org/opensearch/cluster/service/{FakeThreadPoolMasterService.java => FakeThreadPoolClusterManagerService.java} (96%) rename test/framework/src/main/java/org/opensearch/test/disruption/{BlockMasterServiceOnMaster.java => BlockClusterManagerServiceOnClusterManager.java} (96%) rename test/framework/src/main/java/org/opensearch/test/disruption/{BusyMasterServiceDisruption.java => BusyClusterManagerServiceDisruption.java} (95%) rename test/framework/src/test/java/org/opensearch/cluster/service/{FakeThreadPoolMasterServiceTests.java => FakeThreadPoolClusterManagerServiceTests.java} (97%) diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java index f548cfdb0eda2..2803fa262a216 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java @@ -43,7 +43,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentType; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.disruption.BlockMasterServiceOnMaster; +import org.opensearch.test.disruption.BlockClusterManagerServiceOnClusterManager; import org.opensearch.test.disruption.IntermittentLongGCDisruption; import org.opensearch.test.disruption.NetworkDisruption; import org.opensearch.test.disruption.NetworkDisruption.TwoPartitions; @@ -316,7 +316,7 @@ public void testMappingTimeout() throws Exception { .setTransientSettings(Settings.builder().put("indices.mapping.dynamic_timeout", "1ms")) ); - ServiceDisruptionScheme disruption = new BlockMasterServiceOnMaster(random()); + ServiceDisruptionScheme disruption = new BlockClusterManagerServiceOnClusterManager(random()); setDisruptionScheme(disruption); disruption.startDisrupting(); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 7bb63fad7ad24..ce76e955fcc5a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -94,7 +94,7 @@ import org.opensearch.test.OpenSearchIntegTestCase.Scope; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.TestCustomMetadata; -import org.opensearch.test.disruption.BusyMasterServiceDisruption; +import org.opensearch.test.disruption.BusyClusterManagerServiceDisruption; import org.opensearch.test.disruption.ServiceDisruptionScheme; import org.opensearch.test.rest.FakeRestRequest; import org.opensearch.test.transport.MockTransportService; @@ -1157,7 +1157,7 @@ public void testDataNodeRestartWithBusyClusterManagerDuringSnapshot() throws Exc final String dataNode = blockNodeWithIndex("test-repo", "test-idx"); logger.info("--> snapshot"); - ServiceDisruptionScheme disruption = new BusyMasterServiceDisruption(random(), Priority.HIGH); + ServiceDisruptionScheme disruption = new BusyClusterManagerServiceDisruption(random(), Priority.HIGH); setDisruptionScheme(disruption); client(internalCluster().getClusterManagerName()).admin() .cluster() diff --git a/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java b/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java index bb0d07f2c6032..70fdb91ee0632 100644 --- a/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java +++ b/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java @@ -48,7 +48,7 @@ import org.opensearch.cluster.service.ClusterApplier; import org.opensearch.cluster.service.ClusterApplierService; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.cluster.service.FakeThreadPoolMasterService; +import org.opensearch.cluster.service.FakeThreadPoolClusterManagerService; import org.opensearch.cluster.service.ClusterManagerService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; @@ -89,7 +89,7 @@ protected PrioritizedOpenSearchThreadPoolExecutor createThreadPoolExecutor() { } }; - final ClusterManagerService clusterManagerService = new FakeThreadPoolMasterService( + final ClusterManagerService clusterManagerService = new FakeThreadPoolClusterManagerService( "test", "clusterManagerService", threadPool, diff --git a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java index 65a0867ccd26b..c77baba5fe167 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java @@ -43,7 +43,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; -import org.opensearch.cluster.service.FakeThreadPoolMasterService; +import org.opensearch.cluster.service.FakeThreadPoolClusterManagerService; import org.opensearch.cluster.service.ClusterManagerService; import org.opensearch.cluster.service.MasterServiceTests; import org.opensearch.common.Randomness; @@ -144,7 +144,7 @@ private void setupFakeClusterManagerServiceAndCoordinator(long term, ClusterStat random() ); final ThreadPool fakeThreadPool = deterministicTaskQueue.getThreadPool(); - FakeThreadPoolMasterService fakeClusterManagerService = new FakeThreadPoolMasterService( + FakeThreadPoolClusterManagerService fakeClusterManagerService = new FakeThreadPoolClusterManagerService( "test_node", "test", fakeThreadPool, diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 1369309218519..39f86942f2c4b 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -146,7 +146,7 @@ import org.opensearch.cluster.service.ClusterApplierService; import org.opensearch.cluster.service.ClusterManagerService; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.cluster.service.FakeThreadPoolMasterService; +import org.opensearch.cluster.service.FakeThreadPoolClusterManagerService; import org.opensearch.common.CheckedConsumer; import org.opensearch.common.Nullable; import org.opensearch.common.io.stream.NamedWriteableRegistry; @@ -1666,7 +1666,7 @@ private final class TestClusterNode { this.node = node; final Environment environment = createEnvironment(node.getName()); threadPool = deterministicTaskQueue.getThreadPool(runnable -> CoordinatorTests.onNodeLog(node, runnable)); - clusterManagerService = new FakeThreadPoolMasterService( + clusterManagerService = new FakeThreadPoolClusterManagerService( node.getName(), "test", threadPool, diff --git a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java index 9e58e3dcfe0e8..b179d596f32f1 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java @@ -54,7 +54,7 @@ import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.service.ClusterApplierService; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.cluster.service.FakeThreadPoolMasterService; +import org.opensearch.cluster.service.FakeThreadPoolClusterManagerService; import org.opensearch.common.Nullable; import org.opensearch.common.Randomness; import org.opensearch.common.UUIDs; @@ -1026,7 +1026,7 @@ class ClusterNode { private final DiscoveryNode localNode; final MockPersistedState persistedState; final Settings nodeSettings; - private AckedFakeThreadPoolMasterService clusterManagerService; + private AckedFakeThreadPoolClusterManagerService clusterManagerService; private DisruptableClusterApplierService clusterApplierService; private ClusterService clusterService; TransportService transportService; @@ -1106,7 +1106,7 @@ protected Optional getDisruptableMockTransport(Transpo null, emptySet() ); - clusterManagerService = new AckedFakeThreadPoolMasterService( + clusterManagerService = new AckedFakeThreadPoolClusterManagerService( localNode.getId(), "test", threadPool, @@ -1513,11 +1513,11 @@ int getSuccessfulAckIndex(ClusterNode clusterNode) { } } - static class AckedFakeThreadPoolMasterService extends FakeThreadPoolMasterService { + static class AckedFakeThreadPoolClusterManagerService extends FakeThreadPoolClusterManagerService { AckCollector nextAckCollector = new AckCollector(); - AckedFakeThreadPoolMasterService( + AckedFakeThreadPoolClusterManagerService( String nodeName, String serviceName, ThreadPool threadPool, diff --git a/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolMasterService.java b/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerService.java similarity index 96% rename from test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolMasterService.java rename to test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerService.java index 7826a3aef9801..46893fe08bd76 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolMasterService.java +++ b/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerService.java @@ -55,8 +55,8 @@ import static org.apache.lucene.tests.util.LuceneTestCase.random; import static org.opensearch.test.OpenSearchTestCase.randomInt; -public class FakeThreadPoolMasterService extends ClusterManagerService { - private static final Logger logger = LogManager.getLogger(FakeThreadPoolMasterService.class); +public class FakeThreadPoolClusterManagerService extends ClusterManagerService { + private static final Logger logger = LogManager.getLogger(FakeThreadPoolClusterManagerService.class); private final String name; private final List pendingTasks = new ArrayList<>(); @@ -65,7 +65,7 @@ public class FakeThreadPoolMasterService extends ClusterManagerService { private boolean taskInProgress = false; private boolean waitForPublish = false; - public FakeThreadPoolMasterService( + public FakeThreadPoolClusterManagerService( String nodeName, String serviceName, ThreadPool threadPool, @@ -137,7 +137,7 @@ public void run() { if (waitForPublish == false) { taskInProgress = false; } - FakeThreadPoolMasterService.this.scheduleNextTaskIfNecessary(); + FakeThreadPoolClusterManagerService.this.scheduleNextTaskIfNecessary(); } }); } diff --git a/test/framework/src/main/java/org/opensearch/test/disruption/BlockMasterServiceOnMaster.java b/test/framework/src/main/java/org/opensearch/test/disruption/BlockClusterManagerServiceOnClusterManager.java similarity index 96% rename from test/framework/src/main/java/org/opensearch/test/disruption/BlockMasterServiceOnMaster.java rename to test/framework/src/main/java/org/opensearch/test/disruption/BlockClusterManagerServiceOnClusterManager.java index 0a626f1473826..3d166b1590a5b 100644 --- a/test/framework/src/main/java/org/opensearch/test/disruption/BlockMasterServiceOnMaster.java +++ b/test/framework/src/main/java/org/opensearch/test/disruption/BlockClusterManagerServiceOnClusterManager.java @@ -43,11 +43,11 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; -public class BlockMasterServiceOnMaster extends SingleNodeDisruption { +public class BlockClusterManagerServiceOnClusterManager extends SingleNodeDisruption { AtomicReference disruptionLatch = new AtomicReference<>(); - public BlockMasterServiceOnMaster(Random random) { + public BlockClusterManagerServiceOnClusterManager(Random random) { super(random); } diff --git a/test/framework/src/main/java/org/opensearch/test/disruption/BusyMasterServiceDisruption.java b/test/framework/src/main/java/org/opensearch/test/disruption/BusyClusterManagerServiceDisruption.java similarity index 95% rename from test/framework/src/main/java/org/opensearch/test/disruption/BusyMasterServiceDisruption.java rename to test/framework/src/main/java/org/opensearch/test/disruption/BusyClusterManagerServiceDisruption.java index fb40298bb98ef..5e538d09c5ebd 100644 --- a/test/framework/src/main/java/org/opensearch/test/disruption/BusyMasterServiceDisruption.java +++ b/test/framework/src/main/java/org/opensearch/test/disruption/BusyClusterManagerServiceDisruption.java @@ -41,11 +41,11 @@ import java.util.Random; import java.util.concurrent.atomic.AtomicBoolean; -public class BusyMasterServiceDisruption extends SingleNodeDisruption { +public class BusyClusterManagerServiceDisruption extends SingleNodeDisruption { private final AtomicBoolean active = new AtomicBoolean(); private final Priority priority; - public BusyMasterServiceDisruption(Random random, Priority priority) { + public BusyClusterManagerServiceDisruption(Random random, Priority priority) { super(random); this.priority = priority; } diff --git a/test/framework/src/test/java/org/opensearch/cluster/service/FakeThreadPoolMasterServiceTests.java b/test/framework/src/test/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerServiceTests.java similarity index 97% rename from test/framework/src/test/java/org/opensearch/cluster/service/FakeThreadPoolMasterServiceTests.java rename to test/framework/src/test/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerServiceTests.java index dabeeb6742ec4..f09e62fa574e6 100644 --- a/test/framework/src/test/java/org/opensearch/cluster/service/FakeThreadPoolMasterServiceTests.java +++ b/test/framework/src/test/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerServiceTests.java @@ -61,7 +61,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -public class FakeThreadPoolMasterServiceTests extends OpenSearchTestCase { +public class FakeThreadPoolClusterManagerServiceTests extends OpenSearchTestCase { public void testFakeClusterManagerService() { List runnableTasks = new ArrayList<>(); @@ -84,7 +84,7 @@ public void testFakeClusterManagerService() { doAnswer(invocationOnMock -> runnableTasks.add((Runnable) invocationOnMock.getArguments()[0])).when(executorService).execute(any()); when(mockThreadPool.generic()).thenReturn(executorService); - FakeThreadPoolMasterService clusterManagerService = new FakeThreadPoolMasterService( + FakeThreadPoolClusterManagerService clusterManagerService = new FakeThreadPoolClusterManagerService( "test_node", "test", mockThreadPool, From cebeb8c9c91366b88596e85b87df6ad5f55fd874 Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Mon, 1 Aug 2022 09:27:17 -0700 Subject: [PATCH 047/104] Deprecate class FakeThreadPoolMasterService, BlockMasterServiceOnMaster and BusyMasterServiceDisruption (#4058) To support inclusive language, the master terminology is going to be replaced by cluster manager in the code base. In a previous PR https://github.com/opensearch-project/OpenSearch/pull/4051, 3 classes in `test/framework` directory that contains `master` in the name were renamed: ``` FakeThreadPoolMasterService -> FakeThreadPoolClusterManagerService BlockMasterServiceOnMaster -> BlockClusterManagerServiceOnClusterManager BusyMasterServiceDisruption -> BusyClusterManagerServiceDisruption ``` This is a following commit to add back the classes with the old name to keep the backwards compatibility. The classes with the old name will be subclass of the classes with new name, so that maintaining one implementation can support the usage for two classes. Signed-off-by: Tianli Feng --- .../service/FakeThreadPoolMasterService.java | 28 +++++++++++++++++++ .../BlockMasterServiceOnMaster.java | 21 ++++++++++++++ .../BusyMasterServiceDisruption.java | 23 +++++++++++++++ 3 files changed, 72 insertions(+) create mode 100644 test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolMasterService.java create mode 100644 test/framework/src/main/java/org/opensearch/test/disruption/BlockMasterServiceOnMaster.java create mode 100644 test/framework/src/main/java/org/opensearch/test/disruption/BusyMasterServiceDisruption.java diff --git a/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolMasterService.java b/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolMasterService.java new file mode 100644 index 0000000000000..0713432c00189 --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolMasterService.java @@ -0,0 +1,28 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.service; + +import org.opensearch.threadpool.ThreadPool; + +import java.util.function.Consumer; + +/** + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link FakeThreadPoolClusterManagerService} + */ +@Deprecated +public class FakeThreadPoolMasterService extends FakeThreadPoolClusterManagerService { + public FakeThreadPoolMasterService( + String nodeName, + String serviceName, + ThreadPool threadPool, + Consumer onTaskAvailableToRun + ) { + super(nodeName, serviceName, threadPool, onTaskAvailableToRun); + } +} diff --git a/test/framework/src/main/java/org/opensearch/test/disruption/BlockMasterServiceOnMaster.java b/test/framework/src/main/java/org/opensearch/test/disruption/BlockMasterServiceOnMaster.java new file mode 100644 index 0000000000000..bbe99d838f296 --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/test/disruption/BlockMasterServiceOnMaster.java @@ -0,0 +1,21 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test.disruption; + +import java.util.Random; + +/** + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link BlockClusterManagerServiceOnClusterManager} + */ +@Deprecated +public class BlockMasterServiceOnMaster extends BlockClusterManagerServiceOnClusterManager { + public BlockMasterServiceOnMaster(Random random) { + super(random); + } +} diff --git a/test/framework/src/main/java/org/opensearch/test/disruption/BusyMasterServiceDisruption.java b/test/framework/src/main/java/org/opensearch/test/disruption/BusyMasterServiceDisruption.java new file mode 100644 index 0000000000000..884997123e6a4 --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/test/disruption/BusyMasterServiceDisruption.java @@ -0,0 +1,23 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test.disruption; + +import org.opensearch.common.Priority; + +import java.util.Random; + +/** + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link BusyClusterManagerServiceDisruption} + */ +@Deprecated +public class BusyMasterServiceDisruption extends BusyClusterManagerServiceDisruption { + public BusyMasterServiceDisruption(Random random, Priority priority) { + super(random, priority); + } +} From bab2e59bb15a4e6e0a5c926f5ea2d7cc4318365b Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Mon, 1 Aug 2022 15:56:17 -0400 Subject: [PATCH 048/104] Update to Lucene 9.3.0 (#4043) Signed-off-by: Andriy Redko --- buildSrc/version.properties | 2 +- ...cene-expressions-9.3.0-snapshot-b7231bb.jar.sha1 | 1 - .../licenses/lucene-expressions-9.3.0.jar.sha1 | 1 + ...ene-analysis-icu-9.3.0-snapshot-b7231bb.jar.sha1 | 1 - .../licenses/lucene-analysis-icu-9.3.0.jar.sha1 | 1 + ...nalysis-kuromoji-9.3.0-snapshot-b7231bb.jar.sha1 | 1 - .../lucene-analysis-kuromoji-9.3.0.jar.sha1 | 1 + ...ne-analysis-nori-9.3.0-snapshot-b7231bb.jar.sha1 | 1 - .../licenses/lucene-analysis-nori-9.3.0.jar.sha1 | 1 + ...nalysis-phonetic-9.3.0-snapshot-b7231bb.jar.sha1 | 1 - .../lucene-analysis-phonetic-9.3.0.jar.sha1 | 1 + ...analysis-smartcn-9.3.0-snapshot-b7231bb.jar.sha1 | 1 - .../licenses/lucene-analysis-smartcn-9.3.0.jar.sha1 | 1 + ...analysis-stempel-9.3.0-snapshot-b7231bb.jar.sha1 | 1 - .../licenses/lucene-analysis-stempel-9.3.0.jar.sha1 | 1 + ...lysis-morfologik-9.3.0-snapshot-b7231bb.jar.sha1 | 1 - .../lucene-analysis-morfologik-9.3.0.jar.sha1 | 1 + ...-analysis-common-9.3.0-snapshot-b7231bb.jar.sha1 | 1 - .../licenses/lucene-analysis-common-9.3.0.jar.sha1 | 1 + ...-backward-codecs-9.3.0-snapshot-b7231bb.jar.sha1 | 1 - .../licenses/lucene-backward-codecs-9.3.0.jar.sha1 | 1 + .../lucene-core-9.3.0-snapshot-b7231bb.jar.sha1 | 1 - server/licenses/lucene-core-9.3.0.jar.sha1 | 1 + .../lucene-grouping-9.3.0-snapshot-b7231bb.jar.sha1 | 1 - server/licenses/lucene-grouping-9.3.0.jar.sha1 | 1 + ...cene-highlighter-9.3.0-snapshot-b7231bb.jar.sha1 | 1 - server/licenses/lucene-highlighter-9.3.0.jar.sha1 | 1 + .../lucene-join-9.3.0-snapshot-b7231bb.jar.sha1 | 1 - server/licenses/lucene-join-9.3.0.jar.sha1 | 1 + .../lucene-memory-9.3.0-snapshot-b7231bb.jar.sha1 | 1 - server/licenses/lucene-memory-9.3.0.jar.sha1 | 1 + .../lucene-misc-9.3.0-snapshot-b7231bb.jar.sha1 | 1 - server/licenses/lucene-misc-9.3.0.jar.sha1 | 1 + .../lucene-queries-9.3.0-snapshot-b7231bb.jar.sha1 | 1 - server/licenses/lucene-queries-9.3.0.jar.sha1 | 1 + ...cene-queryparser-9.3.0-snapshot-b7231bb.jar.sha1 | 1 - server/licenses/lucene-queryparser-9.3.0.jar.sha1 | 1 + .../lucene-sandbox-9.3.0-snapshot-b7231bb.jar.sha1 | 1 - server/licenses/lucene-sandbox-9.3.0.jar.sha1 | 1 + ...e-spatial-extras-9.3.0-snapshot-b7231bb.jar.sha1 | 1 - .../licenses/lucene-spatial-extras-9.3.0.jar.sha1 | 1 + ...lucene-spatial3d-9.3.0-snapshot-b7231bb.jar.sha1 | 1 - server/licenses/lucene-spatial3d-9.3.0.jar.sha1 | 1 + .../lucene-suggest-9.3.0-snapshot-b7231bb.jar.sha1 | 1 - server/licenses/lucene-suggest-9.3.0.jar.sha1 | 1 + .../fielddata/ordinals/GlobalOrdinalMapping.java | 2 +- .../index/fielddata/ordinals/MultiOrdinals.java | 4 ++-- .../search/aggregations/support/MissingValues.java | 4 ++-- .../search/internal/ContextIndexSearcher.java | 2 ++ .../search/query/TotalHitCountCollectorManager.java | 13 ++++++++++++- .../org/opensearch/search/MultiValueModeTests.java | 2 +- .../bucket/range/BinaryRangeAggregatorTests.java | 2 +- .../aggregations/support/IncludeExcludeTests.java | 2 +- .../aggregations/support/MissingValuesTests.java | 4 ++-- 54 files changed, 47 insertions(+), 34 deletions(-) delete mode 100644 modules/lang-expression/licenses/lucene-expressions-9.3.0-snapshot-b7231bb.jar.sha1 create mode 100644 modules/lang-expression/licenses/lucene-expressions-9.3.0.jar.sha1 delete mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0-snapshot-b7231bb.jar.sha1 create mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0.jar.sha1 delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0-snapshot-b7231bb.jar.sha1 create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0.jar.sha1 delete mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0-snapshot-b7231bb.jar.sha1 create mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0-snapshot-b7231bb.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0.jar.sha1 delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0-snapshot-b7231bb.jar.sha1 create mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0.jar.sha1 delete mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0-snapshot-b7231bb.jar.sha1 create mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0.jar.sha1 delete mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0-snapshot-b7231bb.jar.sha1 create mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0.jar.sha1 delete mode 100644 server/licenses/lucene-analysis-common-9.3.0-snapshot-b7231bb.jar.sha1 create mode 100644 server/licenses/lucene-analysis-common-9.3.0.jar.sha1 delete mode 100644 server/licenses/lucene-backward-codecs-9.3.0-snapshot-b7231bb.jar.sha1 create mode 100644 server/licenses/lucene-backward-codecs-9.3.0.jar.sha1 delete mode 100644 server/licenses/lucene-core-9.3.0-snapshot-b7231bb.jar.sha1 create mode 100644 server/licenses/lucene-core-9.3.0.jar.sha1 delete mode 100644 server/licenses/lucene-grouping-9.3.0-snapshot-b7231bb.jar.sha1 create mode 100644 server/licenses/lucene-grouping-9.3.0.jar.sha1 delete mode 100644 server/licenses/lucene-highlighter-9.3.0-snapshot-b7231bb.jar.sha1 create mode 100644 server/licenses/lucene-highlighter-9.3.0.jar.sha1 delete mode 100644 server/licenses/lucene-join-9.3.0-snapshot-b7231bb.jar.sha1 create mode 100644 server/licenses/lucene-join-9.3.0.jar.sha1 delete mode 100644 server/licenses/lucene-memory-9.3.0-snapshot-b7231bb.jar.sha1 create mode 100644 server/licenses/lucene-memory-9.3.0.jar.sha1 delete mode 100644 server/licenses/lucene-misc-9.3.0-snapshot-b7231bb.jar.sha1 create mode 100644 server/licenses/lucene-misc-9.3.0.jar.sha1 delete mode 100644 server/licenses/lucene-queries-9.3.0-snapshot-b7231bb.jar.sha1 create mode 100644 server/licenses/lucene-queries-9.3.0.jar.sha1 delete mode 100644 server/licenses/lucene-queryparser-9.3.0-snapshot-b7231bb.jar.sha1 create mode 100644 server/licenses/lucene-queryparser-9.3.0.jar.sha1 delete mode 100644 server/licenses/lucene-sandbox-9.3.0-snapshot-b7231bb.jar.sha1 create mode 100644 server/licenses/lucene-sandbox-9.3.0.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-extras-9.3.0-snapshot-b7231bb.jar.sha1 create mode 100644 server/licenses/lucene-spatial-extras-9.3.0.jar.sha1 delete mode 100644 server/licenses/lucene-spatial3d-9.3.0-snapshot-b7231bb.jar.sha1 create mode 100644 server/licenses/lucene-spatial3d-9.3.0.jar.sha1 delete mode 100644 server/licenses/lucene-suggest-9.3.0-snapshot-b7231bb.jar.sha1 create mode 100644 server/licenses/lucene-suggest-9.3.0.jar.sha1 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 5c2f5c86101d2..a988c1f97fa61 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ opensearch = 3.0.0 -lucene = 9.3.0-snapshot-b7231bb +lucene = 9.3.0 bundled_jdk_vendor = adoptium bundled_jdk = 17.0.3+7 diff --git a/modules/lang-expression/licenses/lucene-expressions-9.3.0-snapshot-b7231bb.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.3.0-snapshot-b7231bb.jar.sha1 deleted file mode 100644 index f527a3b68b6a3..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-9.3.0-snapshot-b7231bb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -57ae445a0050ad492ef494b692b486dfe718b564 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.3.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..2d216277b3a8e --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-9.3.0.jar.sha1 @@ -0,0 +1 @@ +5583bcd3a24d3aae40b0a3152458021844ac09aa \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0-snapshot-b7231bb.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0-snapshot-b7231bb.jar.sha1 deleted file mode 100644 index 51cbf51d90626..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0-snapshot-b7231bb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b10e5bdae6df879b770060e0006bbc1c780c886d \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..df4ae8d72dd2b --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0.jar.sha1 @@ -0,0 +1 @@ +11dd9be0448fe594cf918f5260e193b3ab4e07a0 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0-snapshot-b7231bb.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0-snapshot-b7231bb.jar.sha1 deleted file mode 100644 index ff57bbc283385..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0-snapshot-b7231bb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f0ddc3072fd16012dafc74928f87fdfd7669ea4a \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..675bf726d2a65 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0.jar.sha1 @@ -0,0 +1 @@ +87c1357612f2f483174d1a63ea8c6680a1696bac \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0-snapshot-b7231bb.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0-snapshot-b7231bb.jar.sha1 deleted file mode 100644 index 13dd3c8a8bb24..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0-snapshot-b7231bb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d761fa983d9c21099c433731d5519651737750c1 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..8987f89c913df --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0.jar.sha1 @@ -0,0 +1 @@ +5d032dbeb3f4015741336a877dd4b0e62099246c \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0-snapshot-b7231bb.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0-snapshot-b7231bb.jar.sha1 deleted file mode 100644 index 5cba6f6700769..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0-snapshot-b7231bb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -43abbbe7c3c789ac448f898981acf54e487407a6 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..00d66c733c548 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0.jar.sha1 @@ -0,0 +1 @@ +fe6ac8772b545e0abd0c755cd4bd07caad58edb9 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0-snapshot-b7231bb.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0-snapshot-b7231bb.jar.sha1 deleted file mode 100644 index 62097dc39ae20..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0-snapshot-b7231bb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -55df9442a35fe09d4f3f98bd2dda4d1a1dbfd996 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..0c521b5f5ef6a --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0.jar.sha1 @@ -0,0 +1 @@ +288726e13b598c341e81aef8b5c9ce53f51889d0 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0-snapshot-b7231bb.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0-snapshot-b7231bb.jar.sha1 deleted file mode 100644 index 1666e4aae21a6..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0-snapshot-b7231bb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -102cbb1d619b96e1f3e524520658b9327a93aba1 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..ba98dd7e06f71 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0.jar.sha1 @@ -0,0 +1 @@ +166d02f7f98f18c6607335030a404fcad8f57cd6 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0-snapshot-b7231bb.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0-snapshot-b7231bb.jar.sha1 deleted file mode 100644 index 3a2d3cec6b952..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0-snapshot-b7231bb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0d5dc4dfb74d698e51dc9b95268faf6dde4b0815 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..88ac9a13e8ce3 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0.jar.sha1 @@ -0,0 +1 @@ +3c0e4177aa87a4be2826a360f656f3559ea3f997 \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-analysis-common-9.3.0-snapshot-b7231bb.jar.sha1 deleted file mode 100644 index 4cb292ad20c1f..0000000000000 --- a/server/licenses/lucene-analysis-common-9.3.0-snapshot-b7231bb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c6a9569777e4f01c90ed840e5a04234dfcaf42e \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.3.0.jar.sha1 b/server/licenses/lucene-analysis-common-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..2e260eb028f4c --- /dev/null +++ b/server/licenses/lucene-analysis-common-9.3.0.jar.sha1 @@ -0,0 +1 @@ +03496708a19a8a55a0dc4f61f8aa2febc6e8977c \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-backward-codecs-9.3.0-snapshot-b7231bb.jar.sha1 deleted file mode 100644 index 3878ed346c9ce..0000000000000 --- a/server/licenses/lucene-backward-codecs-9.3.0-snapshot-b7231bb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a7ef963f9f9f15fc5018c5fa68bae5cf65692ca9 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.3.0.jar.sha1 b/server/licenses/lucene-backward-codecs-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..1dda17ee92fdb --- /dev/null +++ b/server/licenses/lucene-backward-codecs-9.3.0.jar.sha1 @@ -0,0 +1 @@ +95ea01ee0d1e543e18e3cf58d8a6a27a587a7239 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-core-9.3.0-snapshot-b7231bb.jar.sha1 deleted file mode 100644 index 9f9f6be85c57c..0000000000000 --- a/server/licenses/lucene-core-9.3.0-snapshot-b7231bb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da113c963d62f0c8786d7c294dbbb63d5d7953ab \ No newline at end of file diff --git a/server/licenses/lucene-core-9.3.0.jar.sha1 b/server/licenses/lucene-core-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..fd870008c5bd4 --- /dev/null +++ b/server/licenses/lucene-core-9.3.0.jar.sha1 @@ -0,0 +1 @@ +a030180999bc3f1a65f23f53b38098ca9daeee79 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-grouping-9.3.0-snapshot-b7231bb.jar.sha1 deleted file mode 100644 index 92d0c41c6f4d2..0000000000000 --- a/server/licenses/lucene-grouping-9.3.0-snapshot-b7231bb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -54f65917cfa6c9c54cd0354ba333aa7e0f2980e5 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.3.0.jar.sha1 b/server/licenses/lucene-grouping-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..6f63ca177d3c3 --- /dev/null +++ b/server/licenses/lucene-grouping-9.3.0.jar.sha1 @@ -0,0 +1 @@ +883071196e53ec93d2a53dcc8211ee30be6c00dc \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-highlighter-9.3.0-snapshot-b7231bb.jar.sha1 deleted file mode 100644 index ecab2abeb6220..0000000000000 --- a/server/licenses/lucene-highlighter-9.3.0-snapshot-b7231bb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d73ebe32147c9a12d321c0b1273d5e5d797b705f \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.3.0.jar.sha1 b/server/licenses/lucene-highlighter-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..78264d8ee3713 --- /dev/null +++ b/server/licenses/lucene-highlighter-9.3.0.jar.sha1 @@ -0,0 +1 @@ +7e895c49b9991ea2ec08855c425b9eae44a08764 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-join-9.3.0-snapshot-b7231bb.jar.sha1 deleted file mode 100644 index 725fc883c272b..0000000000000 --- a/server/licenses/lucene-join-9.3.0-snapshot-b7231bb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -797c92ffe35af37ab1783906fb93ed95a145a701 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.3.0.jar.sha1 b/server/licenses/lucene-join-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..5e641f5f01075 --- /dev/null +++ b/server/licenses/lucene-join-9.3.0.jar.sha1 @@ -0,0 +1 @@ +04baaae4ce4a35ae919150dd17cd1e63b0da9d24 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-memory-9.3.0-snapshot-b7231bb.jar.sha1 deleted file mode 100644 index 312a65edb6e24..0000000000000 --- a/server/licenses/lucene-memory-9.3.0-snapshot-b7231bb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5714d64c39021c65dece8ee979d9ea39a327bb87 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.3.0.jar.sha1 b/server/licenses/lucene-memory-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..c8e86c7674ede --- /dev/null +++ b/server/licenses/lucene-memory-9.3.0.jar.sha1 @@ -0,0 +1 @@ +1a2203b332edc1366b9789f5286296e109dbc8c4 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-misc-9.3.0-snapshot-b7231bb.jar.sha1 deleted file mode 100644 index b701384ab601d..0000000000000 --- a/server/licenses/lucene-misc-9.3.0-snapshot-b7231bb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4d401c55114367e574ed51e914661f0a97f91e88 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.3.0.jar.sha1 b/server/licenses/lucene-misc-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..11a459a9f52ba --- /dev/null +++ b/server/licenses/lucene-misc-9.3.0.jar.sha1 @@ -0,0 +1 @@ +61b502c9557247b6803a346c0bab20c9dc89d125 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-queries-9.3.0-snapshot-b7231bb.jar.sha1 deleted file mode 100644 index ec2f7508d35cc..0000000000000 --- a/server/licenses/lucene-queries-9.3.0-snapshot-b7231bb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0f165ff86546565d32a508c82ca80ac2840bcf38 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.3.0.jar.sha1 b/server/licenses/lucene-queries-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..2b577bd33b46a --- /dev/null +++ b/server/licenses/lucene-queries-9.3.0.jar.sha1 @@ -0,0 +1 @@ +d8fe3bce3c05015c5fdb78279f36b9f1a75b98d8 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-queryparser-9.3.0-snapshot-b7231bb.jar.sha1 deleted file mode 100644 index 40a125ccada21..0000000000000 --- a/server/licenses/lucene-queryparser-9.3.0-snapshot-b7231bb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d6fb5af1873628dc026e18b5438042143a9a9824 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.3.0.jar.sha1 b/server/licenses/lucene-queryparser-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..b106860bf9f3e --- /dev/null +++ b/server/licenses/lucene-queryparser-9.3.0.jar.sha1 @@ -0,0 +1 @@ +78f259a66d48f77a2d2b96a0a858efa08eba72dc \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-sandbox-9.3.0-snapshot-b7231bb.jar.sha1 deleted file mode 100644 index b4784be40d072..0000000000000 --- a/server/licenses/lucene-sandbox-9.3.0-snapshot-b7231bb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c48ab8982e6bf9429eded6a06d640db922eb2b69 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.3.0.jar.sha1 b/server/licenses/lucene-sandbox-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..82c2c6d85ca4c --- /dev/null +++ b/server/licenses/lucene-sandbox-9.3.0.jar.sha1 @@ -0,0 +1 @@ +5ee318cf8e9a70c2c99e03e157465316a3d4a17a \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-spatial-extras-9.3.0-snapshot-b7231bb.jar.sha1 deleted file mode 100644 index 6f39582081758..0000000000000 --- a/server/licenses/lucene-spatial-extras-9.3.0-snapshot-b7231bb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d757dc379fee639f54d0574443c5a6fd0b70613a \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.3.0.jar.sha1 b/server/licenses/lucene-spatial-extras-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..8bbc5359487ff --- /dev/null +++ b/server/licenses/lucene-spatial-extras-9.3.0.jar.sha1 @@ -0,0 +1 @@ +c9b226b49ae987a4226791f023562187583eb9ad \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-spatial3d-9.3.0-snapshot-b7231bb.jar.sha1 deleted file mode 100644 index b5986970cb4da..0000000000000 --- a/server/licenses/lucene-spatial3d-9.3.0-snapshot-b7231bb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6a4e6de9b40cd027233a3ed00774810c36457a6c \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.3.0.jar.sha1 b/server/licenses/lucene-spatial3d-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..31132ef0ad6df --- /dev/null +++ b/server/licenses/lucene-spatial3d-9.3.0.jar.sha1 @@ -0,0 +1 @@ +201aa61856ae44fa494504591aed54fd9b75af16 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-suggest-9.3.0-snapshot-b7231bb.jar.sha1 deleted file mode 100644 index 682a0ee88868f..0000000000000 --- a/server/licenses/lucene-suggest-9.3.0-snapshot-b7231bb.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e793761c4a4292de0d52f066787ab5f3133382cd \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.3.0.jar.sha1 b/server/licenses/lucene-suggest-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..71a263aa163f8 --- /dev/null +++ b/server/licenses/lucene-suggest-9.3.0.jar.sha1 @@ -0,0 +1 @@ +fb5d7243ba67616edbda1ecf421c615dd595752d \ No newline at end of file diff --git a/server/src/main/java/org/opensearch/index/fielddata/ordinals/GlobalOrdinalMapping.java b/server/src/main/java/org/opensearch/index/fielddata/ordinals/GlobalOrdinalMapping.java index 884e0d66ffd8d..cc5415aacd276 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/ordinals/GlobalOrdinalMapping.java +++ b/server/src/main/java/org/opensearch/index/fielddata/ordinals/GlobalOrdinalMapping.java @@ -113,7 +113,7 @@ public long cost() { } @Override - public long docValueCount() { + public int docValueCount() { return values.docValueCount(); } } diff --git a/server/src/main/java/org/opensearch/index/fielddata/ordinals/MultiOrdinals.java b/server/src/main/java/org/opensearch/index/fielddata/ordinals/MultiOrdinals.java index 6e3f83690a872..daccb5dfe9fca 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/ordinals/MultiOrdinals.java +++ b/server/src/main/java/org/opensearch/index/fielddata/ordinals/MultiOrdinals.java @@ -231,8 +231,8 @@ public BytesRef lookupOrd(long ord) { } @Override - public long docValueCount() { - return currentEndOffset - currentOffset; + public int docValueCount() { + return Math.toIntExact(currentEndOffset - currentOffset); } } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/MissingValues.java b/server/src/main/java/org/opensearch/search/aggregations/support/MissingValues.java index 179e4f18a1ea1..449c961cb393a 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/MissingValues.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/MissingValues.java @@ -311,7 +311,7 @@ public boolean advanceExact(int doc) throws IOException { } @Override - public long docValueCount() { + public int docValueCount() { return values.docValueCount(); } @@ -346,7 +346,7 @@ public long getValueCount() { } @Override - public long docValueCount() { + public int docValueCount() { return values.docValueCount(); } diff --git a/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java b/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java index efd288fd1cf42..a7f509fd24c2b 100644 --- a/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java +++ b/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java @@ -260,6 +260,8 @@ protected void search(List leaves, Weight weight, Collector c private void searchLeaf(LeafReaderContext ctx, Weight weight, Collector collector) throws IOException { cancellable.checkCancelled(); weight = wrapWeight(weight); + // See please https://github.com/apache/lucene/pull/964 + collector.setWeight(weight); final LeafCollector leafCollector; try { leafCollector = collector.getLeafCollector(ctx); diff --git a/server/src/main/java/org/opensearch/search/query/TotalHitCountCollectorManager.java b/server/src/main/java/org/opensearch/search/query/TotalHitCountCollectorManager.java index b8810c76bbc07..e89a4ba3bae59 100644 --- a/server/src/main/java/org/opensearch/search/query/TotalHitCountCollectorManager.java +++ b/server/src/main/java/org/opensearch/search/query/TotalHitCountCollectorManager.java @@ -8,7 +8,10 @@ package org.opensearch.search.query; +import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.CollectorManager; +import org.apache.lucene.search.LeafCollector; +import org.apache.lucene.search.Scorable; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Sort; import org.apache.lucene.search.TopDocs; @@ -33,7 +36,15 @@ public class TotalHitCountCollectorManager private static final TotalHitCountCollector EMPTY_COLLECTOR = new TotalHitCountCollector() { @Override - public void collect(int doc) {} + public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { + return new LeafCollector() { + @Override + public void setScorer(Scorable scorer) throws IOException {} + + @Override + public void collect(int doc) throws IOException {} + }; + } @Override public ScoreMode scoreMode() { diff --git a/server/src/test/java/org/opensearch/search/MultiValueModeTests.java b/server/src/test/java/org/opensearch/search/MultiValueModeTests.java index 525621c02fd32..eeb743848b600 100644 --- a/server/src/test/java/org/opensearch/search/MultiValueModeTests.java +++ b/server/src/test/java/org/opensearch/search/MultiValueModeTests.java @@ -765,7 +765,7 @@ public long getValueCount() { } @Override - public long docValueCount() { + public int docValueCount() { return array[doc].length; } }; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/range/BinaryRangeAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/range/BinaryRangeAggregatorTests.java index ea4dc09e6a601..9307a70d57ca2 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/range/BinaryRangeAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/range/BinaryRangeAggregatorTests.java @@ -84,7 +84,7 @@ public long getValueCount() { } @Override - public long docValueCount() { + public int docValueCount() { return ords.length; } } diff --git a/server/src/test/java/org/opensearch/search/aggregations/support/IncludeExcludeTests.java b/server/src/test/java/org/opensearch/search/aggregations/support/IncludeExcludeTests.java index 51f135ec0b56b..451bbf535eb3a 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/support/IncludeExcludeTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/support/IncludeExcludeTests.java @@ -99,7 +99,7 @@ public long getValueCount() { } @Override - public long docValueCount() { + public int docValueCount() { return 1; } }; diff --git a/server/src/test/java/org/opensearch/search/aggregations/support/MissingValuesTests.java b/server/src/test/java/org/opensearch/search/aggregations/support/MissingValuesTests.java index 0eca61d825a2d..3dcab9edb444a 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/support/MissingValuesTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/support/MissingValuesTests.java @@ -157,7 +157,7 @@ public long nextOrd() { } @Override - public long docValueCount() { + public int docValueCount() { return ords[doc].length; } }; @@ -263,7 +263,7 @@ public BytesRef lookupOrd(long ord) throws IOException { } @Override - public long docValueCount() { + public int docValueCount() { throw new UnsupportedOperationException(); } From 88f5537b10a77cd1a7aa32990df54e581f1ea369 Mon Sep 17 00:00:00 2001 From: Vacha Shah Date: Mon, 1 Aug 2022 14:13:37 -0700 Subject: [PATCH 049/104] Upgrade dependencies (#4047) * Upgrading dependencies for hadoop and aws-java-sdk Signed-off-by: Vacha Shah * Fixing precommit Signed-off-by: Vacha Shah * Upgrading transitive dependencies Signed-off-by: Vacha Shah * Excluding transitive dependencies Signed-off-by: Vacha Shah --- buildSrc/build.gradle | 2 ++ buildSrc/version.properties | 2 +- .../licenses/aws-java-sdk-core-1.12.247.jar.sha1 | 1 - .../licenses/aws-java-sdk-core-1.12.270.jar.sha1 | 1 + .../licenses/aws-java-sdk-ec2-1.12.247.jar.sha1 | 1 - .../licenses/aws-java-sdk-ec2-1.12.270.jar.sha1 | 1 + plugins/repository-hdfs/build.gradle | 10 ++++------ .../licenses/hadoop-client-api-3.3.1.jar.sha1 | 1 - .../licenses/hadoop-client-api-3.3.3.jar.sha1 | 1 + .../licenses/hadoop-client-runtime-3.3.1.jar.sha1 | 1 - .../licenses/hadoop-client-runtime-3.3.3.jar.sha1 | 1 + .../licenses/hadoop-hdfs-3.3.1.jar.sha1 | 1 - .../licenses/hadoop-hdfs-3.3.3.jar.sha1 | 1 + .../licenses/aws-java-sdk-core-1.12.247.jar.sha1 | 1 - .../licenses/aws-java-sdk-core-1.12.270.jar.sha1 | 1 + .../licenses/aws-java-sdk-s3-1.12.247.jar.sha1 | 1 - .../licenses/aws-java-sdk-s3-1.12.270.jar.sha1 | 1 + .../licenses/aws-java-sdk-sts-1.12.247.jar.sha1 | 1 - .../licenses/aws-java-sdk-sts-1.12.270.jar.sha1 | 1 + .../licenses/jmespath-java-1.12.247.jar.sha1 | 1 - .../licenses/jmespath-java-1.12.270.jar.sha1 | 1 + .../indices/replication/SegmentReplicationIT.java | 2 +- test/fixtures/hdfs-fixture/build.gradle | 5 ++++- 23 files changed, 21 insertions(+), 18 deletions(-) delete mode 100644 plugins/discovery-ec2/licenses/aws-java-sdk-core-1.12.247.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/aws-java-sdk-core-1.12.270.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.12.247.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.12.270.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/hadoop-client-api-3.3.1.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/hadoop-client-api-3.3.3.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/hadoop-client-runtime-3.3.1.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/hadoop-client-runtime-3.3.3.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/hadoop-hdfs-3.3.1.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/hadoop-hdfs-3.3.3.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/aws-java-sdk-core-1.12.247.jar.sha1 create mode 100644 plugins/repository-s3/licenses/aws-java-sdk-core-1.12.270.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/aws-java-sdk-s3-1.12.247.jar.sha1 create mode 100644 plugins/repository-s3/licenses/aws-java-sdk-s3-1.12.270.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/aws-java-sdk-sts-1.12.247.jar.sha1 create mode 100644 plugins/repository-s3/licenses/aws-java-sdk-sts-1.12.270.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/jmespath-java-1.12.247.jar.sha1 create mode 100644 plugins/repository-s3/licenses/jmespath-java-1.12.270.jar.sha1 diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 6ae4512415345..3ef3c6f9faf49 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -112,6 +112,8 @@ dependencies { api 'commons-io:commons-io:2.7' api "net.java.dev.jna:jna:5.11.0" api 'gradle.plugin.com.github.johnrengelman:shadow:7.1.2' + api 'org.jdom:jdom2:2.0.6.1' + api 'org.jetbrains.kotlin:kotlin-stdlib-jdk8:1.7.10' api 'de.thetaphi:forbiddenapis:3.3' api 'com.avast.gradle:gradle-docker-compose-plugin:0.15.2' api 'org.apache.maven:maven-model:3.6.2' diff --git a/buildSrc/version.properties b/buildSrc/version.properties index a988c1f97fa61..fc751d8461e92 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -32,7 +32,7 @@ commonslogging = 1.2 commonscodec = 1.13 # plugin dependencies -aws = 1.12.247 +aws = 1.12.270 # when updating this version, you need to ensure compatibility with: # - plugins/ingest-attachment (transitive dependency, check the upstream POM) diff --git a/plugins/discovery-ec2/licenses/aws-java-sdk-core-1.12.247.jar.sha1 b/plugins/discovery-ec2/licenses/aws-java-sdk-core-1.12.247.jar.sha1 deleted file mode 100644 index 5b3f4a3511769..0000000000000 --- a/plugins/discovery-ec2/licenses/aws-java-sdk-core-1.12.247.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -70f59d940c965a899f69743ec36a8eb099f539ef \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-java-sdk-core-1.12.270.jar.sha1 b/plugins/discovery-ec2/licenses/aws-java-sdk-core-1.12.270.jar.sha1 new file mode 100644 index 0000000000000..ce40f68b3e229 --- /dev/null +++ b/plugins/discovery-ec2/licenses/aws-java-sdk-core-1.12.270.jar.sha1 @@ -0,0 +1 @@ +8f0cc2cc1b41c51e2117f5b1ce6530febf99d4ba \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.12.247.jar.sha1 b/plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.12.247.jar.sha1 deleted file mode 100644 index 03505417f3e26..0000000000000 --- a/plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.12.247.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -30120ff6617fb653d525856480d7ba99528d875d \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.12.270.jar.sha1 b/plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.12.270.jar.sha1 new file mode 100644 index 0000000000000..ac00f6d4e8038 --- /dev/null +++ b/plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.12.270.jar.sha1 @@ -0,0 +1 @@ +6d0ce44b33006e163c25f394f869e6b3a51aefc5 \ No newline at end of file diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index d1658279bfe48..6cd0d4283f33d 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -48,7 +48,7 @@ opensearchplugin { } versions << [ - 'hadoop3': '3.3.1' + 'hadoop3': '3.3.3' ] testFixtures.useFixture ":test:fixtures:krb5kdc-fixture", "hdfs" @@ -60,7 +60,9 @@ configurations { dependencies { api "org.apache.hadoop:hadoop-client-api:${versions.hadoop3}" runtimeOnly "org.apache.hadoop:hadoop-client-runtime:${versions.hadoop3}" - api "org.apache.hadoop:hadoop-hdfs:${versions.hadoop3}" + api("org.apache.hadoop:hadoop-hdfs:${versions.hadoop3}") { + exclude module: 'jetty-server' + } api 'org.apache.htrace:htrace-core4:4.2.0-incubating' api "org.apache.logging.log4j:log4j-core:${versions.log4j}" api 'org.apache.avro:avro:1.11.0' @@ -438,10 +440,6 @@ thirdPartyAudit { 'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper', 'org.apache.hadoop.shaded.org.apache.curator.shaded.com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper$1', 'org.apache.hadoop.shaded.org.xbill.DNS.spi.DNSJavaNameServiceDescriptor', - 'org.apache.hadoop.shaded.org.xerial.snappy.pure.PureJavaSnappy', - 'org.apache.hadoop.shaded.org.xerial.snappy.pure.SnappyRawCompressor', - 'org.apache.hadoop.shaded.org.xerial.snappy.pure.SnappyRawDecompressor', - 'org.apache.hadoop.shaded.org.xerial.snappy.pure.UnsafeUtil', 'org.apache.avro.reflect.FieldAccessUnsafe', 'org.apache.avro.reflect.FieldAccessUnsafe$UnsafeBooleanField', diff --git a/plugins/repository-hdfs/licenses/hadoop-client-api-3.3.1.jar.sha1 b/plugins/repository-hdfs/licenses/hadoop-client-api-3.3.1.jar.sha1 deleted file mode 100644 index dc2f20e310d30..0000000000000 --- a/plugins/repository-hdfs/licenses/hadoop-client-api-3.3.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4b9c9cdd9967495838fb521001699c4c9dddf183 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/hadoop-client-api-3.3.3.jar.sha1 b/plugins/repository-hdfs/licenses/hadoop-client-api-3.3.3.jar.sha1 new file mode 100644 index 0000000000000..8df133d0bd106 --- /dev/null +++ b/plugins/repository-hdfs/licenses/hadoop-client-api-3.3.3.jar.sha1 @@ -0,0 +1 @@ +d0593aed2d4df9bcee507550913d29d589ebd84a \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/hadoop-client-runtime-3.3.1.jar.sha1 b/plugins/repository-hdfs/licenses/hadoop-client-runtime-3.3.1.jar.sha1 deleted file mode 100644 index feb37ecc90255..0000000000000 --- a/plugins/repository-hdfs/licenses/hadoop-client-runtime-3.3.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f3a55d882328ee87a1054f99d62ba987fa9029a4 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/hadoop-client-runtime-3.3.3.jar.sha1 b/plugins/repository-hdfs/licenses/hadoop-client-runtime-3.3.3.jar.sha1 new file mode 100644 index 0000000000000..f980eebc7a46c --- /dev/null +++ b/plugins/repository-hdfs/licenses/hadoop-client-runtime-3.3.3.jar.sha1 @@ -0,0 +1 @@ +52619ecfb0225d7ae67b15264521064824ac57ca \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/hadoop-hdfs-3.3.1.jar.sha1 b/plugins/repository-hdfs/licenses/hadoop-hdfs-3.3.1.jar.sha1 deleted file mode 100644 index 66c98cf7ec291..0000000000000 --- a/plugins/repository-hdfs/licenses/hadoop-hdfs-3.3.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5da7f270cb6564e099e0d2d424285a24fca62bd2 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/hadoop-hdfs-3.3.3.jar.sha1 b/plugins/repository-hdfs/licenses/hadoop-hdfs-3.3.3.jar.sha1 new file mode 100644 index 0000000000000..463b7415e4c4b --- /dev/null +++ b/plugins/repository-hdfs/licenses/hadoop-hdfs-3.3.3.jar.sha1 @@ -0,0 +1 @@ +d4d199760c11d47f90e12fe3882e2b24c77e4eb5 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-java-sdk-core-1.12.247.jar.sha1 b/plugins/repository-s3/licenses/aws-java-sdk-core-1.12.247.jar.sha1 deleted file mode 100644 index 5b3f4a3511769..0000000000000 --- a/plugins/repository-s3/licenses/aws-java-sdk-core-1.12.247.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -70f59d940c965a899f69743ec36a8eb099f539ef \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-java-sdk-core-1.12.270.jar.sha1 b/plugins/repository-s3/licenses/aws-java-sdk-core-1.12.270.jar.sha1 new file mode 100644 index 0000000000000..ce40f68b3e229 --- /dev/null +++ b/plugins/repository-s3/licenses/aws-java-sdk-core-1.12.270.jar.sha1 @@ -0,0 +1 @@ +8f0cc2cc1b41c51e2117f5b1ce6530febf99d4ba \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-java-sdk-s3-1.12.247.jar.sha1 b/plugins/repository-s3/licenses/aws-java-sdk-s3-1.12.247.jar.sha1 deleted file mode 100644 index 2d32399f871b4..0000000000000 --- a/plugins/repository-s3/licenses/aws-java-sdk-s3-1.12.247.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -648c59d979e2792b4aa8f444a4748abd62a65783 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-java-sdk-s3-1.12.270.jar.sha1 b/plugins/repository-s3/licenses/aws-java-sdk-s3-1.12.270.jar.sha1 new file mode 100644 index 0000000000000..73b9b4cd8d410 --- /dev/null +++ b/plugins/repository-s3/licenses/aws-java-sdk-s3-1.12.270.jar.sha1 @@ -0,0 +1 @@ +2901cdd72a7f0d940b2bd4e1bcdb606d5d33736f \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-java-sdk-sts-1.12.247.jar.sha1 b/plugins/repository-s3/licenses/aws-java-sdk-sts-1.12.247.jar.sha1 deleted file mode 100644 index 31ce4a4e6a5cb..0000000000000 --- a/plugins/repository-s3/licenses/aws-java-sdk-sts-1.12.247.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3e77a7409ccf7ef3c3d342897dd75590147d2ffe \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-java-sdk-sts-1.12.270.jar.sha1 b/plugins/repository-s3/licenses/aws-java-sdk-sts-1.12.270.jar.sha1 new file mode 100644 index 0000000000000..3b77226cdd5d2 --- /dev/null +++ b/plugins/repository-s3/licenses/aws-java-sdk-sts-1.12.270.jar.sha1 @@ -0,0 +1 @@ +aeffa3e8d9471377adf1108e21dab92f4f13edb3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jmespath-java-1.12.247.jar.sha1 b/plugins/repository-s3/licenses/jmespath-java-1.12.247.jar.sha1 deleted file mode 100644 index fd71f57f9a5fc..0000000000000 --- a/plugins/repository-s3/licenses/jmespath-java-1.12.247.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a1f7acde495f815af705490f2a37b3758299a8e4 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jmespath-java-1.12.270.jar.sha1 b/plugins/repository-s3/licenses/jmespath-java-1.12.270.jar.sha1 new file mode 100644 index 0000000000000..a50e3040575c3 --- /dev/null +++ b/plugins/repository-s3/licenses/jmespath-java-1.12.270.jar.sha1 @@ -0,0 +1 @@ +5bd3e1976e3b3b94c30e4868af9a5bfc4221e24a \ No newline at end of file diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java index 2c91eadafbee8..a1cc0148dcdac 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java @@ -248,7 +248,7 @@ private void assertSegmentStats(int numberOfReplicas) throws IOException { /** * Waits until the replica is caught up to the latest primary segments gen. - * @throws Exception + * @throws Exception if assertion fails */ private void waitForReplicaUpdate() throws Exception { // wait until the replica has the latest segment generation. diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 0795cecaa36cc..73aca2a6ca02b 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -33,7 +33,9 @@ apply plugin: 'opensearch.java' group = 'hdfs' dependencies { - api "org.apache.hadoop:hadoop-minicluster:3.3.3" + api("org.apache.hadoop:hadoop-minicluster:3.3.3") { + exclude module: 'websocket-client' + } api "org.apache.commons:commons-compress:1.21" api "commons-codec:commons-codec:${versions.commonscodec}" api "org.apache.logging.log4j:log4j-core:${versions.log4j}" @@ -44,4 +46,5 @@ dependencies { api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api 'net.minidev:json-smart:2.4.8' api "org.mockito:mockito-core:${versions.mockito}" + api "com.google.protobuf:protobuf-java:3.21.2" } From 954f0ecc8b51bb58e0b38b2b15a48fbd6ea5ec5d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Aug 2022 14:45:13 -0700 Subject: [PATCH 050/104] Bump com.diffplug.spotless from 6.8.0 to 6.9.0 (#4065) Bumps com.diffplug.spotless from 6.8.0 to 6.9.0. --- updated-dependencies: - dependency-name: com.diffplug.spotless dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.gradle b/build.gradle index ceb9ce395c3c7..e0bb961ce14c2 100644 --- a/build.gradle +++ b/build.gradle @@ -55,7 +55,7 @@ plugins { id 'lifecycle-base' id 'opensearch.docker-support' id 'opensearch.global-build-info' - id "com.diffplug.spotless" version "6.8.0" apply false + id "com.diffplug.spotless" version "6.9.0" apply false id "org.gradle.test-retry" version "1.4.0" apply false id "test-report-aggregation" id 'jacoco-report-aggregation' From 249e4fcface0355d28cb97e763a91e0ea92c7c5f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Aug 2022 14:55:40 -0700 Subject: [PATCH 051/104] Bump protobuf-java from 3.21.2 to 3.21.4 in /plugins/repository-hdfs (#4019) * Bump protobuf-java from 3.21.2 to 3.21.4 in /plugins/repository-hdfs Bumps [protobuf-java](https://github.com/protocolbuffers/protobuf) from 3.21.2 to 3.21.4. - [Release notes](https://github.com/protocolbuffers/protobuf/releases) - [Changelog](https://github.com/protocolbuffers/protobuf/blob/main/generate_changelog.py) - [Commits](https://github.com/protocolbuffers/protobuf/compare/v3.21.2...v3.21.4) --- updated-dependencies: - dependency-name: com.google.protobuf:protobuf-java dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- plugins/repository-hdfs/build.gradle | 2 +- plugins/repository-hdfs/licenses/protobuf-java-3.21.2.jar.sha1 | 1 - plugins/repository-hdfs/licenses/protobuf-java-3.21.4.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-hdfs/licenses/protobuf-java-3.21.2.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/protobuf-java-3.21.4.jar.sha1 diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 6cd0d4283f33d..50784c809e657 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -69,7 +69,7 @@ dependencies { api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api 'com.google.code.gson:gson:2.9.0' runtimeOnly 'com.google.guava:guava:31.1-jre' - api 'com.google.protobuf:protobuf-java:3.21.2' + api 'com.google.protobuf:protobuf-java:3.21.4' api "commons-logging:commons-logging:${versions.commonslogging}" api 'commons-cli:commons-cli:1.5.0' api "commons-codec:commons-codec:${versions.commonscodec}" diff --git a/plugins/repository-hdfs/licenses/protobuf-java-3.21.2.jar.sha1 b/plugins/repository-hdfs/licenses/protobuf-java-3.21.2.jar.sha1 deleted file mode 100644 index e4bdd2f80a407..0000000000000 --- a/plugins/repository-hdfs/licenses/protobuf-java-3.21.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b7755d218ee7c15541afb51f2d247ca951603e0b \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/protobuf-java-3.21.4.jar.sha1 b/plugins/repository-hdfs/licenses/protobuf-java-3.21.4.jar.sha1 new file mode 100644 index 0000000000000..f232c9a449547 --- /dev/null +++ b/plugins/repository-hdfs/licenses/protobuf-java-3.21.4.jar.sha1 @@ -0,0 +1 @@ +9947febd7a6d0695726c78f603a149b7b7c108e0 \ No newline at end of file From 707ca132db3c8cba0672483ee54deb3763e4ad77 Mon Sep 17 00:00:00 2001 From: Daniel Widdis Date: Mon, 1 Aug 2022 16:01:30 -0700 Subject: [PATCH 052/104] Publish transport-netty4 module to central repository (#4054) Signed-off-by: Daniel Widdis --- modules/transport-netty4/build.gradle | 3 +++ 1 file changed, 3 insertions(+) diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index 89988fc3ed224..450eaed14fa46 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -38,6 +38,9 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.java-rest-test' apply plugin: 'opensearch.internal-cluster-test' +// The transport-netty4 plugin is published to maven +apply plugin: 'opensearch.publish' + /* TODOs: * fix permissions such that only netty4 can open sockets etc? From b0080eb8dfe905f15a2df92bd3a349222e0df1b3 Mon Sep 17 00:00:00 2001 From: Xue Zhou <85715413+xuezhou25@users.noreply.github.com> Date: Mon, 1 Aug 2022 21:56:42 -0700 Subject: [PATCH 053/104] Replace master terminology in documentation (#4079) Signed-off-by: Xue Zhou --- DEVELOPER_GUIDE.md | 2 +- TESTING.md | 2 +- .../src/main/resources/rest-api-spec/test/README.md | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/DEVELOPER_GUIDE.md b/DEVELOPER_GUIDE.md index 70abfda767353..ce84d9658a808 100644 --- a/DEVELOPER_GUIDE.md +++ b/DEVELOPER_GUIDE.md @@ -460,7 +460,7 @@ Work to make sure that OpenSearch can scale in a distributed manner. Includes: -- Nodes (Master, Data, Compute, Ingest, Discovery, etc.) +- Nodes (Cluster Manager, Data, Compute, Ingest, Discovery, etc.) - Replication & Merge Policies (Document, Segment level) - Snapshot/Restore (repositories; S3, Azure, GCP, NFS) - Translog (e.g., OpenSearch, Kafka, Kinesis) diff --git a/TESTING.md b/TESTING.md index d6f246dbd6dcc..de88a2bfec6c7 100644 --- a/TESTING.md +++ b/TESTING.md @@ -395,7 +395,7 @@ The branch needs to be available on the remote that the BWC makes of the reposit Example: -Say you need to make a change to `master` and have a BWC layer in `5.x`. You will need to: . Create a branch called `index_req_change` off your remote `${remote}`. This will contain your change. . Create a branch called `index_req_bwc_5.x` off `5.x`. This will contain your bwc layer. . Push both branches to your remote repository. . Run the tests with `./gradlew check -Dbwc.remote=${remote} -Dbwc.refspec.5.x=index_req_bwc_5.x`. +Say you need to make a change to `main` and have a BWC layer in `5.x`. You will need to: . Create a branch called `index_req_change` off your remote `${remote}`. This will contain your change. . Create a branch called `index_req_bwc_5.x` off `5.x`. This will contain your bwc layer. . Push both branches to your remote repository. . Run the tests with `./gradlew check -Dbwc.remote=${remote} -Dbwc.refspec.5.x=index_req_bwc_5.x`. ### Skip fetching latest diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/README.md b/rest-api-spec/src/main/resources/rest-api-spec/test/README.md index da97910df174f..6975ed2d2f18b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/README.md +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/README.md @@ -115,7 +115,7 @@ Indicates the runner can parse `node_selector` under the `do` operator and use i Allows you to use a stashed value in any key of an object during a `match` assertion - - set: {nodes.$master.http.publish_address: host} + - set: {nodes.$cluster_manager.http.publish_address: host} - match: $body: { @@ -319,13 +319,13 @@ Stashed values can be used in property names, eg: - do: cluster.state: {} - - set: { master_node: master } + - set: { cluster_manager_node: cluster_manager } - do: nodes.info: metric: [ transport ] - - is_true: nodes.$master.transport.profiles + - is_true: nodes.$cluster_manager.transport.profiles Note that not only expected values can be retrieved from the stashed values (as in the example above), but the same goes for actual values: From 5eac54d4ade73f6d0ed80b0f2408a104a98e3232 Mon Sep 17 00:00:00 2001 From: Ketan Verma Date: Tue, 2 Aug 2022 13:25:40 +0530 Subject: [PATCH 054/104] Support task resource tracking in OpenSearch (#3982) * Support task resource tracking in OpenSearch * Reopens changes from #2639 (reverted in #3046) to add a framework for task resource tracking. Currently, SearchTask and SearchShardTask support resource tracking but it can be extended to any other task. * Fixed a race-condition when Task is unregistered before its threads are stopped * Improved error handling and simplified task resource tracking completion listener * Avoid registering listeners on already completed tasks Signed-off-by: Ketan Verma --- .../admin/cluster/node/tasks/TasksIT.java | 6 + .../tasks/list/TransportListTasksAction.java | 13 +- .../action/search/SearchShardTask.java | 5 + .../opensearch/action/search/SearchTask.java | 5 + .../action/support/TransportAction.java | 78 ++- .../org/opensearch/cluster/ClusterModule.java | 2 + .../common/settings/ClusterSettings.java | 4 +- .../util/concurrent/OpenSearchExecutors.java | 18 +- .../common/util/concurrent/ThreadContext.java | 16 +- .../main/java/org/opensearch/node/Node.java | 14 +- .../main/java/org/opensearch/tasks/Task.java | 103 ++- .../org/opensearch/tasks/TaskManager.java | 51 +- .../tasks/TaskResourceTrackingService.java | 248 +++++++ .../opensearch/tasks/ThreadResourceInfo.java | 10 +- .../threadpool/ResizableExecutorBuilder.java | 25 +- .../RunnableTaskExecutionListener.java | 33 + .../threadpool/TaskAwareRunnable.java | 90 +++ .../org/opensearch/threadpool/ThreadPool.java | 16 +- .../transport/RequestHandlerRegistry.java | 4 + .../tasks/RecordingTaskManagerListener.java | 3 + .../node/tasks/ResourceAwareTasksTests.java | 653 ++++++++++++++++++ .../node/tasks/TaskManagerTestCase.java | 17 +- .../bulk/TransportBulkActionIngestTests.java | 3 +- .../util/concurrent/ThreadContextTests.java | 10 + .../snapshots/SnapshotResiliencyTests.java | 3 + .../opensearch/tasks/TaskManagerTests.java | 6 +- .../TaskResourceTrackingServiceTests.java | 97 +++ .../test/tasks/MockTaskManager.java | 16 + .../test/tasks/MockTaskManagerListener.java | 3 + .../opensearch/threadpool/TestThreadPool.java | 20 +- 30 files changed, 1513 insertions(+), 59 deletions(-) create mode 100644 server/src/main/java/org/opensearch/tasks/TaskResourceTrackingService.java create mode 100644 server/src/main/java/org/opensearch/threadpool/RunnableTaskExecutionListener.java create mode 100644 server/src/main/java/org/opensearch/threadpool/TaskAwareRunnable.java create mode 100644 server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java create mode 100644 server/src/test/java/org/opensearch/tasks/TaskResourceTrackingServiceTests.java diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java index d37285211f774..61059f83f0e77 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java @@ -470,6 +470,9 @@ public void onTaskUnregistered(Task task) {} @Override public void waitForTaskCompletion(Task task) {} + + @Override + public void taskExecutionStarted(Task task, Boolean closeableInvoked) {} }); } // Need to run the task in a separate thread because node client's .execute() is blocked by our task listener @@ -651,6 +654,9 @@ public void waitForTaskCompletion(Task task) { waitForWaitingToStart.countDown(); } + @Override + public void taskExecutionStarted(Task task, Boolean closeableInvoked) {} + @Override public void onTaskRegistered(Task task) {} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java index 796ea023edd40..aede3fe5b1cc0 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java @@ -42,6 +42,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskInfo; +import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -65,8 +66,15 @@ public static long waitForCompletionTimeout(TimeValue timeout) { private static final TimeValue DEFAULT_WAIT_FOR_COMPLETION_TIMEOUT = timeValueSeconds(30); + private final TaskResourceTrackingService taskResourceTrackingService; + @Inject - public TransportListTasksAction(ClusterService clusterService, TransportService transportService, ActionFilters actionFilters) { + public TransportListTasksAction( + ClusterService clusterService, + TransportService transportService, + ActionFilters actionFilters, + TaskResourceTrackingService taskResourceTrackingService + ) { super( ListTasksAction.NAME, clusterService, @@ -77,6 +85,7 @@ public TransportListTasksAction(ClusterService clusterService, TransportService TaskInfo::new, ThreadPool.Names.MANAGEMENT ); + this.taskResourceTrackingService = taskResourceTrackingService; } @Override @@ -106,6 +115,8 @@ protected void processTasks(ListTasksRequest request, Consumer operation) } taskManager.waitForTaskCompletion(task, timeoutNanos); }); + } else { + operation = operation.andThen(taskResourceTrackingService::refreshResourceStats); } super.processTasks(request, operation); } diff --git a/server/src/main/java/org/opensearch/action/search/SearchShardTask.java b/server/src/main/java/org/opensearch/action/search/SearchShardTask.java index e5f25595b9ec8..57831896db714 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchShardTask.java +++ b/server/src/main/java/org/opensearch/action/search/SearchShardTask.java @@ -51,6 +51,11 @@ public SearchShardTask(long id, String type, String action, String description, super(id, type, action, description, parentTaskId, headers); } + @Override + public boolean supportsResourceTracking() { + return true; + } + @Override public boolean shouldCancelChildrenOnCancellation() { return false; diff --git a/server/src/main/java/org/opensearch/action/search/SearchTask.java b/server/src/main/java/org/opensearch/action/search/SearchTask.java index 89f23bb9bdaeb..987485fe44c65 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchTask.java +++ b/server/src/main/java/org/opensearch/action/search/SearchTask.java @@ -80,6 +80,11 @@ public final String getDescription() { return descriptionSupplier.get(); } + @Override + public boolean supportsResourceTracking() { + return true; + } + /** * Attach a {@link SearchProgressListener} to this task. */ diff --git a/server/src/main/java/org/opensearch/action/support/TransportAction.java b/server/src/main/java/org/opensearch/action/support/TransportAction.java index a60648e50ff31..71ae187b48c4e 100644 --- a/server/src/main/java/org/opensearch/action/support/TransportAction.java +++ b/server/src/main/java/org/opensearch/action/support/TransportAction.java @@ -40,6 +40,7 @@ import org.opensearch.action.ActionResponse; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; +import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskCancelledException; import org.opensearch.tasks.TaskId; @@ -93,31 +94,39 @@ public final Task execute(Request request, ActionListener listener) { */ final Releasable unregisterChildNode = registerChildNode(request.getParentTask()); final Task task; + try { task = taskManager.register("transport", actionName, request); } catch (TaskCancelledException e) { unregisterChildNode.close(); throw e; } - execute(task, request, new ActionListener() { - @Override - public void onResponse(Response response) { - try { - Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); - } finally { - listener.onResponse(response); + + ThreadContext.StoredContext storedContext = taskManager.taskExecutionStarted(task); + try { + execute(task, request, new ActionListener() { + @Override + public void onResponse(Response response) { + try { + Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); + } finally { + listener.onResponse(response); + } } - } - @Override - public void onFailure(Exception e) { - try { - Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); - } finally { - listener.onFailure(e); + @Override + public void onFailure(Exception e) { + try { + Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); + } finally { + listener.onFailure(e); + } } - } - }); + }); + } finally { + storedContext.close(); + } + return task; } @@ -134,25 +143,30 @@ public final Task execute(Request request, TaskListener listener) { unregisterChildNode.close(); throw e; } - execute(task, request, new ActionListener() { - @Override - public void onResponse(Response response) { - try { - Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); - } finally { - listener.onResponse(task, response); + ThreadContext.StoredContext storedContext = taskManager.taskExecutionStarted(task); + try { + execute(task, request, new ActionListener() { + @Override + public void onResponse(Response response) { + try { + Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); + } finally { + listener.onResponse(task, response); + } } - } - @Override - public void onFailure(Exception e) { - try { - Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); - } finally { - listener.onFailure(task, e); + @Override + public void onFailure(Exception e) { + try { + Releasables.close(unregisterChildNode, () -> taskManager.unregister(task)); + } finally { + listener.onFailure(task, e); + } } - } - }); + }); + } finally { + storedContext.close(); + } return task; } diff --git a/server/src/main/java/org/opensearch/cluster/ClusterModule.java b/server/src/main/java/org/opensearch/cluster/ClusterModule.java index 900dceb8564c9..f8ba520e465e2 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterModule.java @@ -94,6 +94,7 @@ import org.opensearch.script.ScriptMetadata; import org.opensearch.snapshots.SnapshotsInfoService; import org.opensearch.tasks.Task; +import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.tasks.TaskResultsService; import java.util.ArrayList; @@ -396,6 +397,7 @@ protected void configure() { bind(NodeMappingRefreshAction.class).asEagerSingleton(); bind(MappingUpdatedAction.class).asEagerSingleton(); bind(TaskResultsService.class).asEagerSingleton(); + bind(TaskResourceTrackingService.class).asEagerSingleton(); bind(AllocationDeciders.class).toInstance(allocationDeciders); bind(ShardsAllocator.class).toInstance(shardsAllocator); } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index bee3428188026..fe322992cd3e5 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -41,6 +41,7 @@ import org.opensearch.index.ShardIndexingPressureMemoryManager; import org.opensearch.index.ShardIndexingPressureSettings; import org.opensearch.index.ShardIndexingPressureStore; +import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.watcher.ResourceWatcherService; import org.opensearch.action.admin.cluster.configuration.TransportAddVotingConfigExclusionsAction; import org.opensearch.action.admin.indices.close.TransportCloseIndexAction; @@ -575,7 +576,8 @@ public void apply(Settings value, Settings current, Settings previous) { ShardIndexingPressureMemoryManager.THROUGHPUT_DEGRADATION_LIMITS, ShardIndexingPressureMemoryManager.SUCCESSFUL_REQUEST_ELAPSED_TIMEOUT, ShardIndexingPressureMemoryManager.MAX_OUTSTANDING_REQUESTS, - IndexingPressure.MAX_INDEXING_BYTES + IndexingPressure.MAX_INDEXING_BYTES, + TaskResourceTrackingService.TASK_RESOURCE_TRACKING_ENABLED ) ) ); diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchExecutors.java b/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchExecutors.java index 14f9486b4baf0..ec1024bbe5f30 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchExecutors.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchExecutors.java @@ -40,6 +40,8 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.node.Node; +import org.opensearch.threadpool.RunnableTaskExecutionListener; +import org.opensearch.threadpool.TaskAwareRunnable; import java.util.List; import java.util.Optional; @@ -55,6 +57,7 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; /** @@ -182,13 +185,24 @@ public static OpenSearchThreadPoolExecutor newResizable( int size, int queueCapacity, ThreadFactory threadFactory, - ThreadContext contextHolder + ThreadContext contextHolder, + AtomicReference runnableTaskListener ) { if (queueCapacity <= 0) { throw new IllegalArgumentException("queue capacity for [" + name + "] executor must be positive, got: " + queueCapacity); } + Function runnableWrapper; + if (runnableTaskListener != null) { + runnableWrapper = (runnable) -> { + TaskAwareRunnable taskAwareRunnable = new TaskAwareRunnable(contextHolder, runnable, runnableTaskListener); + return new TimedRunnable(taskAwareRunnable); + }; + } else { + runnableWrapper = TimedRunnable::new; + } + return new QueueResizableOpenSearchThreadPoolExecutor( name, size, @@ -196,7 +210,7 @@ public static OpenSearchThreadPoolExecutor newResizable( 0, TimeUnit.MILLISECONDS, new ResizableBlockingQueue<>(ConcurrentCollections.newBlockingQueue(), queueCapacity), - TimedRunnable::new, + runnableWrapper, threadFactory, new OpenSearchAbortPolicy(), contextHolder diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java index 5e2381c949c00..5b9a77c75dddb 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java @@ -66,6 +66,7 @@ import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_COUNT; import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_SIZE; +import static org.opensearch.tasks.TaskResourceTrackingService.TASK_ID; /** * A ThreadContext is a map of string headers and a transient map of keyed objects that are associated with @@ -135,16 +136,23 @@ public StoredContext stashContext() { * This is needed so the DeprecationLogger in another thread can see the value of X-Opaque-ID provided by a user. * Otherwise when context is stash, it should be empty. */ + + ThreadContextStruct threadContextStruct = DEFAULT_CONTEXT; + if (context.requestHeaders.containsKey(Task.X_OPAQUE_ID)) { - ThreadContextStruct threadContextStruct = DEFAULT_CONTEXT.putHeaders( + threadContextStruct = threadContextStruct.putHeaders( MapBuilder.newMapBuilder() .put(Task.X_OPAQUE_ID, context.requestHeaders.get(Task.X_OPAQUE_ID)) .immutableMap() ); - threadLocal.set(threadContextStruct); - } else { - threadLocal.set(DEFAULT_CONTEXT); } + + if (context.transientHeaders.containsKey(TASK_ID)) { + threadContextStruct = threadContextStruct.putTransient(TASK_ID, context.transientHeaders.get(TASK_ID)); + } + + threadLocal.set(threadContextStruct); + return () -> { // If the node and thus the threadLocal get closed while this task // is still executing, we don't want this runnable to fail with an diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index bc5f17759d498..d3f0912cab638 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -39,10 +39,12 @@ import org.opensearch.common.util.FeatureFlags; import org.opensearch.cluster.routing.allocation.AwarenessReplicaBalance; import org.opensearch.index.IndexingPressureService; +import org.opensearch.index.store.RemoteDirectoryFactory; import org.opensearch.indices.replication.SegmentReplicationSourceFactory; import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.indices.replication.SegmentReplicationSourceService; -import org.opensearch.index.store.RemoteDirectoryFactory; +import org.opensearch.tasks.TaskResourceTrackingService; +import org.opensearch.threadpool.RunnableTaskExecutionListener; import org.opensearch.watcher.ResourceWatcherService; import org.opensearch.Assertions; import org.opensearch.Build; @@ -219,6 +221,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; import java.util.function.UnaryOperator; import java.util.stream.Collectors; @@ -338,6 +341,7 @@ public static class DiscoverySettings { private final LocalNodeFactory localNodeFactory; private final NodeService nodeService; final NamedWriteableRegistry namedWriteableRegistry; + private final AtomicReference runnableTaskListener; public Node(Environment environment) { this(environment, Collections.emptyList(), true); @@ -447,7 +451,8 @@ protected Node( final List> executorBuilders = pluginsService.getExecutorBuilders(settings); - final ThreadPool threadPool = new ThreadPool(settings, executorBuilders.toArray(new ExecutorBuilder[0])); + runnableTaskListener = new AtomicReference<>(); + final ThreadPool threadPool = new ThreadPool(settings, runnableTaskListener, executorBuilders.toArray(new ExecutorBuilder[0])); resourcesToClose.add(() -> ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS)); final ResourceWatcherService resourceWatcherService = new ResourceWatcherService(settings, threadPool); resourcesToClose.add(resourceWatcherService); @@ -1095,6 +1100,11 @@ public Node start() throws NodeValidationException { TransportService transportService = injector.getInstance(TransportService.class); transportService.getTaskManager().setTaskResultsService(injector.getInstance(TaskResultsService.class)); transportService.getTaskManager().setTaskCancellationService(new TaskCancellationService(transportService)); + + TaskResourceTrackingService taskResourceTrackingService = injector.getInstance(TaskResourceTrackingService.class); + transportService.getTaskManager().setTaskResourceTrackingService(taskResourceTrackingService); + runnableTaskListener.set(taskResourceTrackingService); + transportService.start(); assert localNodeFactory.getNode() != null; assert transportService.getLocalNode().equals(localNodeFactory.getNode()) diff --git a/server/src/main/java/org/opensearch/tasks/Task.java b/server/src/main/java/org/opensearch/tasks/Task.java index 522ecac5ef698..d052d374ef1f0 100644 --- a/server/src/main/java/org/opensearch/tasks/Task.java +++ b/server/src/main/java/org/opensearch/tasks/Task.java @@ -34,7 +34,9 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.opensearch.ExceptionsHelper; import org.opensearch.action.ActionResponse; +import org.opensearch.action.NotifyOnceListener; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.io.stream.NamedWriteable; import org.opensearch.common.xcontent.ToXContent; @@ -47,6 +49,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicInteger; /** * Current task information @@ -78,6 +81,15 @@ public class Task { private final Map> resourceStats; + private final List> resourceTrackingCompletionListeners; + + /** + * Keeps track of the number of active resource tracking threads for this task. It is initialized to 1 to track + * the task's own/self thread. When this value becomes 0, all threads have been marked inactive and the resource + * tracking can be stopped for this task. + */ + private final AtomicInteger numActiveResourceTrackingThreads = new AtomicInteger(1); + /** * The task's start time as a wall clock time since epoch ({@link System#currentTimeMillis()} style). */ @@ -89,7 +101,18 @@ public class Task { private final long startTimeNanos; public Task(long id, String type, String action, String description, TaskId parentTask, Map headers) { - this(id, type, action, description, parentTask, System.currentTimeMillis(), System.nanoTime(), headers, new ConcurrentHashMap<>()); + this( + id, + type, + action, + description, + parentTask, + System.currentTimeMillis(), + System.nanoTime(), + headers, + new ConcurrentHashMap<>(), + new ArrayList<>() + ); } public Task( @@ -101,7 +124,8 @@ public Task( long startTime, long startTimeNanos, Map headers, - ConcurrentHashMap> resourceStats + ConcurrentHashMap> resourceStats, + List> resourceTrackingCompletionListeners ) { this.id = id; this.type = type; @@ -112,6 +136,7 @@ public Task( this.startTimeNanos = startTimeNanos; this.headers = headers; this.resourceStats = resourceStats; + this.resourceTrackingCompletionListeners = resourceTrackingCompletionListeners; } /** @@ -291,7 +316,8 @@ public void startThreadResourceTracking(long threadId, ResourceStatsType statsTy ); } } - threadResourceInfoList.add(new ThreadResourceInfo(statsType, resourceUsageMetrics)); + threadResourceInfoList.add(new ThreadResourceInfo(threadId, statsType, resourceUsageMetrics)); + incrementResourceTrackingThreads(); } /** @@ -331,6 +357,7 @@ public void stopThreadResourceTracking(long threadId, ResourceStatsType statsTyp if (threadResourceInfo.getStatsType() == statsType && threadResourceInfo.isActive()) { threadResourceInfo.setActive(false); threadResourceInfo.recordResourceUsageMetrics(resourceUsageMetrics); + decrementResourceTrackingThreads(); return; } } @@ -338,6 +365,17 @@ public void stopThreadResourceTracking(long threadId, ResourceStatsType statsTyp throw new IllegalStateException("cannot update final values if active thread resource entry is not present"); } + /** + * Individual tasks can override this if they want to support task resource tracking. We just need to make sure that + * the ThreadPool on which the task runs on have runnable wrapper similar to + * {@link org.opensearch.common.util.concurrent.OpenSearchExecutors#newResizable} + * + * @return true if resource tracking is supported by the task + */ + public boolean supportsResourceTracking() { + return false; + } + /** * Report of the internal status of a task. These can vary wildly from task * to task because each task is implemented differently but we should try @@ -370,4 +408,63 @@ public TaskResult result(DiscoveryNode node, ActionResponse response) throws IOE throw new IllegalStateException("response has to implement ToXContent to be able to store the results"); } } + + /** + * Registers a task resource tracking completion listener on this task if resource tracking is still active. + * Returns true on successful subscription, false otherwise. + */ + public boolean addResourceTrackingCompletionListener(NotifyOnceListener listener) { + if (numActiveResourceTrackingThreads.get() > 0) { + resourceTrackingCompletionListeners.add(listener); + return true; + } + + return false; + } + + /** + * Increments the number of active resource tracking threads. + * + * @return the number of active resource tracking threads. + */ + public int incrementResourceTrackingThreads() { + return numActiveResourceTrackingThreads.incrementAndGet(); + } + + /** + * Decrements the number of active resource tracking threads. + * This method is called when threads finish execution, and also when the task is unregistered (to mark the task's + * own thread as complete). When the active thread count becomes zero, the onTaskResourceTrackingCompleted method + * is called exactly once on all registered listeners. + * + * Since a task is unregistered after the message is processed, it implies that the threads responsible to produce + * the response must have started prior to it (i.e. startThreadResourceTracking called before unregister). + * This ensures that the number of active threads doesn't drop to zero pre-maturely. + * + * Rarely, some threads may even start execution after the task is unregistered. As resource stats are piggy-backed + * with the response, any thread usage info captured after the task is unregistered may be irrelevant. + * + * @return the number of active resource tracking threads. + */ + public int decrementResourceTrackingThreads() { + int count = numActiveResourceTrackingThreads.decrementAndGet(); + + if (count == 0) { + List listenerExceptions = new ArrayList<>(); + resourceTrackingCompletionListeners.forEach(listener -> { + try { + listener.onResponse(this); + } catch (Exception e1) { + try { + listener.onFailure(e1); + } catch (Exception e2) { + listenerExceptions.add(e2); + } + } + }); + ExceptionsHelper.maybeThrowRuntimeAndSuppress(listenerExceptions); + } + + return count; + } } diff --git a/server/src/main/java/org/opensearch/tasks/TaskManager.java b/server/src/main/java/org/opensearch/tasks/TaskManager.java index 9bb931ea7f0aa..01b6b8ea603bf 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskManager.java +++ b/server/src/main/java/org/opensearch/tasks/TaskManager.java @@ -44,6 +44,7 @@ import org.opensearch.OpenSearchTimeoutException; import org.opensearch.action.ActionListener; import org.opensearch.action.ActionResponse; +import org.opensearch.action.NotifyOnceListener; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterStateApplier; import org.opensearch.cluster.node.DiscoveryNode; @@ -91,7 +92,9 @@ public class TaskManager implements ClusterStateApplier { private static final TimeValue WAIT_FOR_COMPLETION_POLL = timeValueMillis(100); - /** Rest headers that are copied to the task */ + /** + * Rest headers that are copied to the task + */ private final List taskHeaders; private final ThreadPool threadPool; @@ -105,6 +108,7 @@ public class TaskManager implements ClusterStateApplier { private final Map banedParents = new ConcurrentHashMap<>(); private TaskResultsService taskResultsService; + private final SetOnce taskResourceTrackingService = new SetOnce<>(); private volatile DiscoveryNodes lastDiscoveryNodes = DiscoveryNodes.EMPTY_NODES; @@ -127,6 +131,10 @@ public void setTaskCancellationService(TaskCancellationService taskCancellationS this.cancellationService.set(taskCancellationService); } + public void setTaskResourceTrackingService(TaskResourceTrackingService taskResourceTrackingService) { + this.taskResourceTrackingService.set(taskResourceTrackingService); + } + /** * Registers a task without parent task */ @@ -152,6 +160,30 @@ public Task register(String type, String action, TaskAwareRequest request) { logger.trace("register {} [{}] [{}] [{}]", task.getId(), type, action, task.getDescription()); } + if (task.supportsResourceTracking()) { + boolean success = task.addResourceTrackingCompletionListener(new NotifyOnceListener<>() { + @Override + protected void innerOnResponse(Task task) { + // Stop tracking the task once the last thread has been marked inactive. + if (taskResourceTrackingService.get() != null && task.supportsResourceTracking()) { + taskResourceTrackingService.get().stopTracking(task); + } + } + + @Override + protected void innerOnFailure(Exception e) { + ExceptionsHelper.reThrowIfNotNull(e); + } + }); + + if (success == false) { + logger.debug( + "failed to register a completion listener as task resource tracking has already completed [taskId={}]", + task.getId() + ); + } + } + if (task instanceof CancellableTask) { registerCancellableTask(task); } else { @@ -204,6 +236,10 @@ public void cancel(CancellableTask task, String reason, Runnable listener) { */ public Task unregister(Task task) { logger.trace("unregister task for id: {}", task.getId()); + + // Decrement the task's self-thread as part of unregistration. + task.decrementResourceTrackingThreads(); + if (task instanceof CancellableTask) { CancellableTaskHolder holder = cancellableTasks.remove(task.getId()); if (holder != null) { @@ -363,6 +399,7 @@ public int getBanCount() { * Bans all tasks with the specified parent task from execution, cancels all tasks that are currently executing. *

* This method is called when a parent task that has children is cancelled. + * * @return a list of pending cancellable child tasks */ public List setBan(TaskId parentTaskId, String reason) { @@ -450,6 +487,18 @@ public void waitForTaskCompletion(Task task, long untilInNanos) { throw new OpenSearchTimeoutException("Timed out waiting for completion of [{}]", task); } + /** + * Takes actions when a task is registered and its execution starts + * + * @param task getting executed. + * @return AutoCloseable to free up resources (clean up thread context) when task execution block returns + */ + public ThreadContext.StoredContext taskExecutionStarted(Task task) { + if (taskResourceTrackingService.get() == null) return () -> {}; + + return taskResourceTrackingService.get().startTracking(task); + } + private static class CancellableTaskHolder { private final CancellableTask task; private boolean finished = false; diff --git a/server/src/main/java/org/opensearch/tasks/TaskResourceTrackingService.java b/server/src/main/java/org/opensearch/tasks/TaskResourceTrackingService.java new file mode 100644 index 0000000000000..c3cad117390e4 --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/TaskResourceTrackingService.java @@ -0,0 +1,248 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.tasks; + +import com.sun.management.ThreadMXBean; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.common.util.concurrent.ConcurrentMapLong; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.threadpool.RunnableTaskExecutionListener; +import org.opensearch.threadpool.ThreadPool; + +import java.lang.management.ManagementFactory; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import static org.opensearch.tasks.ResourceStatsType.WORKER_STATS; + +/** + * Service that helps track resource usage of tasks running on a node. + */ +@SuppressForbidden(reason = "ThreadMXBean#getThreadAllocatedBytes") +public class TaskResourceTrackingService implements RunnableTaskExecutionListener { + + private static final Logger logger = LogManager.getLogger(TaskManager.class); + + public static final Setting TASK_RESOURCE_TRACKING_ENABLED = Setting.boolSetting( + "task_resource_tracking.enabled", + true, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + public static final String TASK_ID = "TASK_ID"; + + private static final ThreadMXBean threadMXBean = (ThreadMXBean) ManagementFactory.getThreadMXBean(); + + private final ConcurrentMapLong resourceAwareTasks = ConcurrentCollections.newConcurrentMapLongWithAggressiveConcurrency(); + private final ThreadPool threadPool; + private volatile boolean taskResourceTrackingEnabled; + + @Inject + public TaskResourceTrackingService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { + this.taskResourceTrackingEnabled = TASK_RESOURCE_TRACKING_ENABLED.get(settings); + this.threadPool = threadPool; + clusterSettings.addSettingsUpdateConsumer(TASK_RESOURCE_TRACKING_ENABLED, this::setTaskResourceTrackingEnabled); + } + + public void setTaskResourceTrackingEnabled(boolean taskResourceTrackingEnabled) { + this.taskResourceTrackingEnabled = taskResourceTrackingEnabled; + } + + public boolean isTaskResourceTrackingEnabled() { + return taskResourceTrackingEnabled; + } + + public boolean isTaskResourceTrackingSupported() { + return threadMXBean.isThreadAllocatedMemorySupported() && threadMXBean.isThreadAllocatedMemoryEnabled(); + } + + /** + * Executes logic only if task supports resource tracking and resource tracking setting is enabled. + *

+ * 1. Starts tracking the task in map of resourceAwareTasks. + * 2. Adds Task Id in thread context to make sure it's available while task is processed across multiple threads. + * + * @param task for which resources needs to be tracked + * @return Autocloseable stored context to restore ThreadContext to the state before this method changed it. + */ + public ThreadContext.StoredContext startTracking(Task task) { + if (task.supportsResourceTracking() == false + || isTaskResourceTrackingEnabled() == false + || isTaskResourceTrackingSupported() == false) { + return () -> {}; + } + + logger.debug("Starting resource tracking for task: {}", task.getId()); + resourceAwareTasks.put(task.getId(), task); + return addTaskIdToThreadContext(task); + } + + /** + * Stops tracking task registered earlier for tracking. + *

+ * It doesn't have feature enabled check to avoid any issues if setting was disable while the task was in progress. + *

+ * It's also responsible to stop tracking the current thread's resources against this task if not already done. + * This happens when the thread executing the request logic itself calls the unregister method. So in this case unregister + * happens before runnable finishes. + * + * @param task task which has finished and doesn't need resource tracking. + */ + public void stopTracking(Task task) { + logger.debug("Stopping resource tracking for task: {}", task.getId()); + try { + if (isCurrentThreadWorkingOnTask(task)) { + taskExecutionFinishedOnThread(task.getId(), Thread.currentThread().getId()); + } + } catch (Exception e) { + logger.warn("Failed while trying to mark the task execution on current thread completed.", e); + assert false; + } finally { + resourceAwareTasks.remove(task.getId()); + } + } + + /** + * Refreshes the resource stats for the tasks provided by looking into which threads are actively working on these + * and how much resources these have consumed till now. + * + * @param tasks for which resource stats needs to be refreshed. + */ + public void refreshResourceStats(Task... tasks) { + if (isTaskResourceTrackingEnabled() == false || isTaskResourceTrackingSupported() == false) { + return; + } + + for (Task task : tasks) { + if (task.supportsResourceTracking() && resourceAwareTasks.containsKey(task.getId())) { + refreshResourceStats(task); + } + } + } + + private void refreshResourceStats(Task resourceAwareTask) { + try { + logger.debug("Refreshing resource stats for Task: {}", resourceAwareTask.getId()); + List threadsWorkingOnTask = getThreadsWorkingOnTask(resourceAwareTask); + threadsWorkingOnTask.forEach( + threadId -> resourceAwareTask.updateThreadResourceStats(threadId, WORKER_STATS, getResourceUsageMetricsForThread(threadId)) + ); + } catch (IllegalStateException e) { + logger.debug("Resource stats already updated."); + } + + } + + /** + * Called when a thread starts working on a task's runnable. + * + * @param taskId of the task for which runnable is starting + * @param threadId of the thread which will be executing the runnable and we need to check resource usage for this + * thread + */ + @Override + public void taskExecutionStartedOnThread(long taskId, long threadId) { + try { + final Task task = resourceAwareTasks.get(taskId); + if (task != null) { + logger.debug("Task execution started on thread. Task: {}, Thread: {}", taskId, threadId); + task.startThreadResourceTracking(threadId, WORKER_STATS, getResourceUsageMetricsForThread(threadId)); + } + } catch (Exception e) { + logger.warn(new ParameterizedMessage("Failed to mark thread execution started for task: [{}]", taskId), e); + assert false; + } + + } + + /** + * Called when a thread finishes working on a task's runnable. + * + * @param taskId of the task for which runnable is complete + * @param threadId of the thread which executed the runnable and we need to check resource usage for this thread + */ + @Override + public void taskExecutionFinishedOnThread(long taskId, long threadId) { + try { + final Task task = resourceAwareTasks.get(taskId); + if (task != null) { + logger.debug("Task execution finished on thread. Task: {}, Thread: {}", taskId, threadId); + task.stopThreadResourceTracking(threadId, WORKER_STATS, getResourceUsageMetricsForThread(threadId)); + } + } catch (Exception e) { + logger.warn(new ParameterizedMessage("Failed to mark thread execution finished for task: [{}]", taskId), e); + assert false; + } + } + + public Map getResourceAwareTasks() { + return Collections.unmodifiableMap(resourceAwareTasks); + } + + private ResourceUsageMetric[] getResourceUsageMetricsForThread(long threadId) { + ResourceUsageMetric currentMemoryUsage = new ResourceUsageMetric( + ResourceStats.MEMORY, + threadMXBean.getThreadAllocatedBytes(threadId) + ); + ResourceUsageMetric currentCPUUsage = new ResourceUsageMetric(ResourceStats.CPU, threadMXBean.getThreadCpuTime(threadId)); + return new ResourceUsageMetric[] { currentMemoryUsage, currentCPUUsage }; + } + + private boolean isCurrentThreadWorkingOnTask(Task task) { + long threadId = Thread.currentThread().getId(); + List threadResourceInfos = task.getResourceStats().getOrDefault(threadId, Collections.emptyList()); + + for (ThreadResourceInfo threadResourceInfo : threadResourceInfos) { + if (threadResourceInfo.isActive()) { + return true; + } + } + return false; + } + + private List getThreadsWorkingOnTask(Task task) { + List activeThreads = new ArrayList<>(); + for (List threadResourceInfos : task.getResourceStats().values()) { + for (ThreadResourceInfo threadResourceInfo : threadResourceInfos) { + if (threadResourceInfo.isActive()) { + activeThreads.add(threadResourceInfo.getThreadId()); + } + } + } + return activeThreads; + } + + /** + * Adds Task Id in the ThreadContext. + *

+ * Stashes the existing ThreadContext and preserves all the existing ThreadContext's data in the new ThreadContext + * as well. + * + * @param task for which Task Id needs to be added in ThreadContext. + * @return StoredContext reference to restore the ThreadContext from which we created a new one. + * Caller can call context.restore() to get the existing ThreadContext back. + */ + private ThreadContext.StoredContext addTaskIdToThreadContext(Task task) { + ThreadContext threadContext = threadPool.getThreadContext(); + ThreadContext.StoredContext storedContext = threadContext.newStoredContext(true, Collections.singletonList(TASK_ID)); + threadContext.putTransient(TASK_ID, task.getId()); + return storedContext; + } + +} diff --git a/server/src/main/java/org/opensearch/tasks/ThreadResourceInfo.java b/server/src/main/java/org/opensearch/tasks/ThreadResourceInfo.java index a0b38649b6420..de49d86d1d5c4 100644 --- a/server/src/main/java/org/opensearch/tasks/ThreadResourceInfo.java +++ b/server/src/main/java/org/opensearch/tasks/ThreadResourceInfo.java @@ -17,11 +17,13 @@ * @opensearch.internal */ public class ThreadResourceInfo { + private final long threadId; private volatile boolean isActive = true; private final ResourceStatsType statsType; private final ResourceUsageInfo resourceUsageInfo; - public ThreadResourceInfo(ResourceStatsType statsType, ResourceUsageMetric... resourceUsageMetrics) { + public ThreadResourceInfo(long threadId, ResourceStatsType statsType, ResourceUsageMetric... resourceUsageMetrics) { + this.threadId = threadId; this.statsType = statsType; this.resourceUsageInfo = new ResourceUsageInfo(resourceUsageMetrics); } @@ -45,12 +47,16 @@ public ResourceStatsType getStatsType() { return statsType; } + public long getThreadId() { + return threadId; + } + public ResourceUsageInfo getResourceUsageInfo() { return resourceUsageInfo; } @Override public String toString() { - return resourceUsageInfo + ", stats_type=" + statsType + ", is_active=" + isActive; + return resourceUsageInfo + ", stats_type=" + statsType + ", is_active=" + isActive + ", threadId=" + threadId; } } diff --git a/server/src/main/java/org/opensearch/threadpool/ResizableExecutorBuilder.java b/server/src/main/java/org/opensearch/threadpool/ResizableExecutorBuilder.java index fd9ca1d3b5f3b..23f8a8979f821 100644 --- a/server/src/main/java/org/opensearch/threadpool/ResizableExecutorBuilder.java +++ b/server/src/main/java/org/opensearch/threadpool/ResizableExecutorBuilder.java @@ -20,6 +20,7 @@ import java.util.Locale; import java.util.concurrent.ExecutorService; import java.util.concurrent.ThreadFactory; +import java.util.concurrent.atomic.AtomicReference; /** * A builder for resizable executors. @@ -30,12 +31,26 @@ public final class ResizableExecutorBuilder extends ExecutorBuilder sizeSetting; private final Setting queueSizeSetting; + private final AtomicReference runnableTaskListener; - ResizableExecutorBuilder(final Settings settings, final String name, final int size, final int queueSize) { - this(settings, name, size, queueSize, "thread_pool." + name); + ResizableExecutorBuilder( + final Settings settings, + final String name, + final int size, + final int queueSize, + final AtomicReference runnableTaskListener + ) { + this(settings, name, size, queueSize, "thread_pool." + name, runnableTaskListener); } - public ResizableExecutorBuilder(final Settings settings, final String name, final int size, final int queueSize, final String prefix) { + public ResizableExecutorBuilder( + final Settings settings, + final String name, + final int size, + final int queueSize, + final String prefix, + final AtomicReference runnableTaskListener + ) { super(name); final String sizeKey = settingsKey(prefix, "size"); this.sizeSetting = new Setting<>( @@ -50,6 +65,7 @@ public ResizableExecutorBuilder(final Settings settings, final String name, fina queueSize, new Setting.Property[] { Setting.Property.NodeScope, Setting.Property.Dynamic } ); + this.runnableTaskListener = runnableTaskListener; } @Override @@ -77,7 +93,8 @@ ThreadPool.ExecutorHolder build(final ResizableExecutorSettings settings, final size, queueSize, threadFactory, - threadContext + threadContext, + runnableTaskListener ); final ThreadPool.Info info = new ThreadPool.Info( name(), diff --git a/server/src/main/java/org/opensearch/threadpool/RunnableTaskExecutionListener.java b/server/src/main/java/org/opensearch/threadpool/RunnableTaskExecutionListener.java new file mode 100644 index 0000000000000..03cd66f80d044 --- /dev/null +++ b/server/src/main/java/org/opensearch/threadpool/RunnableTaskExecutionListener.java @@ -0,0 +1,33 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.threadpool; + +/** + * Listener for events when a runnable execution starts or finishes on a thread and is aware of the task for which the + * runnable is associated to. + */ +public interface RunnableTaskExecutionListener { + + /** + * Sends an update when ever a task's execution start on a thread + * + * @param taskId of task which has started + * @param threadId of thread which is executing the task + */ + void taskExecutionStartedOnThread(long taskId, long threadId); + + /** + * + * Sends an update when task execution finishes on a thread + * + * @param taskId of task which has finished + * @param threadId of thread which executed the task + */ + void taskExecutionFinishedOnThread(long taskId, long threadId); +} diff --git a/server/src/main/java/org/opensearch/threadpool/TaskAwareRunnable.java b/server/src/main/java/org/opensearch/threadpool/TaskAwareRunnable.java new file mode 100644 index 0000000000000..183b9b2f4cf9a --- /dev/null +++ b/server/src/main/java/org/opensearch/threadpool/TaskAwareRunnable.java @@ -0,0 +1,90 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.threadpool; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.ExceptionsHelper; +import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.concurrent.WrappedRunnable; +import org.opensearch.tasks.TaskManager; + +import java.util.Objects; +import java.util.concurrent.atomic.AtomicReference; + +import static java.lang.Thread.currentThread; +import static org.opensearch.tasks.TaskResourceTrackingService.TASK_ID; + +/** + * Responsible for wrapping the original task's runnable and sending updates on when it starts and finishes to + * entities listening to the events. + * + * It's able to associate runnable with a task with the help of task Id available in thread context. + */ +public class TaskAwareRunnable extends AbstractRunnable implements WrappedRunnable { + + private static final Logger logger = LogManager.getLogger(TaskManager.class); + + private final Runnable original; + private final ThreadContext threadContext; + private final AtomicReference runnableTaskListener; + + public TaskAwareRunnable( + final ThreadContext threadContext, + final Runnable original, + final AtomicReference runnableTaskListener + ) { + this.original = original; + this.threadContext = threadContext; + this.runnableTaskListener = runnableTaskListener; + } + + @Override + public void onFailure(Exception e) { + ExceptionsHelper.reThrowIfNotNull(e); + } + + @Override + public boolean isForceExecution() { + return original instanceof AbstractRunnable && ((AbstractRunnable) original).isForceExecution(); + } + + @Override + public void onRejection(final Exception e) { + if (original instanceof AbstractRunnable) { + ((AbstractRunnable) original).onRejection(e); + } else { + ExceptionsHelper.reThrowIfNotNull(e); + } + } + + @Override + protected void doRun() throws Exception { + assert runnableTaskListener.get() != null : "Listener should be attached"; + Long taskId = threadContext.getTransient(TASK_ID); + if (Objects.nonNull(taskId)) { + runnableTaskListener.get().taskExecutionStartedOnThread(taskId, currentThread().getId()); + } else { + logger.debug("Task Id not available in thread context. Skipping update. Thread Info: {}", Thread.currentThread()); + } + try { + original.run(); + } finally { + if (Objects.nonNull(taskId)) { + runnableTaskListener.get().taskExecutionFinishedOnThread(taskId, currentThread().getId()); + } + } + } + + @Override + public Runnable unwrap() { + return original; + } +} diff --git a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java index cc8d81d2a7b4b..928b4871590c6 100644 --- a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java @@ -69,6 +69,7 @@ import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import static java.util.Collections.unmodifiableMap; @@ -200,6 +201,14 @@ public Collection builders() { ); public ThreadPool(final Settings settings, final ExecutorBuilder... customBuilders) { + this(settings, null, customBuilders); + } + + public ThreadPool( + final Settings settings, + final AtomicReference runnableTaskListener, + final ExecutorBuilder... customBuilders + ) { assert Node.NODE_NAME_SETTING.exists(settings); final Map builders = new HashMap<>(); @@ -211,8 +220,11 @@ public ThreadPool(final Settings settings, final ExecutorBuilder... customBui builders.put(Names.WRITE, new FixedExecutorBuilder(settings, Names.WRITE, allocatedProcessors, 10000)); builders.put(Names.GET, new FixedExecutorBuilder(settings, Names.GET, allocatedProcessors, 1000)); builders.put(Names.ANALYZE, new FixedExecutorBuilder(settings, Names.ANALYZE, 1, 16)); - builders.put(Names.SEARCH, new ResizableExecutorBuilder(settings, Names.SEARCH, searchThreadPoolSize(allocatedProcessors), 1000)); - builders.put(Names.SEARCH_THROTTLED, new ResizableExecutorBuilder(settings, Names.SEARCH_THROTTLED, 1, 100)); + builders.put( + Names.SEARCH, + new ResizableExecutorBuilder(settings, Names.SEARCH, searchThreadPoolSize(allocatedProcessors), 1000, runnableTaskListener) + ); + builders.put(Names.SEARCH_THROTTLED, new ResizableExecutorBuilder(settings, Names.SEARCH_THROTTLED, 1, 100, runnableTaskListener)); builders.put(Names.MANAGEMENT, new ScalingExecutorBuilder(Names.MANAGEMENT, 1, 5, TimeValue.timeValueMinutes(5))); // no queue as this means clients will need to handle rejections on listener queue even if the operation succeeded // the assumption here is that the listeners should be very lightweight on the listeners side diff --git a/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java b/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java index b65b72b745a01..dbd6f651a6b3c 100644 --- a/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java +++ b/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java @@ -37,6 +37,7 @@ import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; import org.opensearch.search.internal.ShardSearchRequest; +import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskManager; @@ -86,6 +87,8 @@ public Request newRequest(StreamInput in) throws IOException { public void processMessageReceived(Request request, TransportChannel channel) throws Exception { final Task task = taskManager.register(channel.getChannelType(), action, request); + ThreadContext.StoredContext contextToRestore = taskManager.taskExecutionStarted(task); + Releasable unregisterTask = () -> taskManager.unregister(task); try { if (channel instanceof TcpTransportChannel && task instanceof CancellableTask) { @@ -104,6 +107,7 @@ public void processMessageReceived(Request request, TransportChannel channel) th unregisterTask = null; } finally { Releasables.close(unregisterTask); + contextToRestore.restore(); } } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/RecordingTaskManagerListener.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/RecordingTaskManagerListener.java index 7756eb12bb3f4..9bd44185baf24 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/RecordingTaskManagerListener.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/RecordingTaskManagerListener.java @@ -75,6 +75,9 @@ public synchronized void onTaskUnregistered(Task task) { @Override public void waitForTaskCompletion(Task task) {} + @Override + public void taskExecutionStarted(Task task, Boolean closeableInvoked) {} + public synchronized List> getEvents() { return Collections.unmodifiableList(new ArrayList<>(events)); } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java new file mode 100644 index 0000000000000..654d5cde7bb00 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java @@ -0,0 +1,653 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.node.tasks; + +import com.sun.management.ThreadMXBean; +import org.opensearch.ExceptionsHelper; +import org.opensearch.action.ActionListener; +import org.opensearch.action.NotifyOnceListener; +import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; +import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequest; +import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; +import org.opensearch.action.support.ActionTestUtils; +import org.opensearch.action.support.nodes.BaseNodeRequest; +import org.opensearch.action.support.nodes.BaseNodesRequest; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.tasks.CancellableTask; +import org.opensearch.tasks.Task; +import org.opensearch.tasks.TaskCancelledException; +import org.opensearch.tasks.TaskId; +import org.opensearch.tasks.TaskInfo; +import org.opensearch.test.tasks.MockTaskManager; +import org.opensearch.test.tasks.MockTaskManagerListener; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiConsumer; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.opensearch.tasks.TaskResourceTrackingService.TASK_ID; + +@SuppressForbidden(reason = "ThreadMXBean#getThreadAllocatedBytes") +public class ResourceAwareTasksTests extends TaskManagerTestCase { + + private static final ThreadMXBean threadMXBean = (ThreadMXBean) ManagementFactory.getThreadMXBean(); + + public static class ResourceAwareNodeRequest extends BaseNodeRequest { + protected String requestName; + + public ResourceAwareNodeRequest() { + super(); + } + + public ResourceAwareNodeRequest(StreamInput in) throws IOException { + super(in); + requestName = in.readString(); + } + + public ResourceAwareNodeRequest(NodesRequest request) { + requestName = request.requestName; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(requestName); + } + + @Override + public String getDescription() { + return "ResourceAwareNodeRequest[" + requestName + "]"; + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new CancellableTask(id, type, action, getDescription(), parentTaskId, headers) { + @Override + public boolean shouldCancelChildrenOnCancellation() { + return false; + } + + @Override + public boolean supportsResourceTracking() { + return true; + } + }; + } + } + + public static class NodesRequest extends BaseNodesRequest { + private final String requestName; + + private NodesRequest(StreamInput in) throws IOException { + super(in); + requestName = in.readString(); + } + + public NodesRequest(String requestName, String... nodesIds) { + super(nodesIds); + this.requestName = requestName; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(requestName); + } + + @Override + public String getDescription() { + return "NodesRequest[" + requestName + "]"; + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new CancellableTask(id, type, action, getDescription(), parentTaskId, headers) { + @Override + public boolean shouldCancelChildrenOnCancellation() { + return true; + } + }; + } + } + + /** + * Simulates a task which executes work on search executor. + */ + class ResourceAwareNodesAction extends AbstractTestNodesAction { + private final TaskTestContext taskTestContext; + private final boolean blockForCancellation; + + ResourceAwareNodesAction( + String actionName, + ThreadPool threadPool, + ClusterService clusterService, + TransportService transportService, + boolean shouldBlock, + TaskTestContext taskTestContext + ) { + super(actionName, threadPool, clusterService, transportService, NodesRequest::new, ResourceAwareNodeRequest::new); + this.taskTestContext = taskTestContext; + this.blockForCancellation = shouldBlock; + } + + @Override + protected ResourceAwareNodeRequest newNodeRequest(NodesRequest request) { + return new ResourceAwareNodeRequest(request); + } + + @Override + protected NodeResponse nodeOperation(ResourceAwareNodeRequest request, Task task) { + assert task.supportsResourceTracking(); + + AtomicLong threadId = new AtomicLong(); + Future result = threadPool.executor(ThreadPool.Names.SEARCH).submit(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + ExceptionsHelper.reThrowIfNotNull(e); + } + + @Override + @SuppressForbidden(reason = "ThreadMXBean#getThreadAllocatedBytes") + protected void doRun() { + taskTestContext.memoryConsumptionWhenExecutionStarts = threadMXBean.getThreadAllocatedBytes( + Thread.currentThread().getId() + ); + threadId.set(Thread.currentThread().getId()); + + // operationStartValidator will be called just before the task execution. + if (taskTestContext.operationStartValidator != null) { + taskTestContext.operationStartValidator.accept(task, threadId.get()); + } + + // operationFinishedValidator will be called just after all task threads are marked inactive and + // the task is unregistered. + if (taskTestContext.operationFinishedValidator != null) { + boolean success = task.addResourceTrackingCompletionListener(new NotifyOnceListener<>() { + @Override + protected void innerOnResponse(Task task) { + taskTestContext.operationFinishedValidator.accept(task, threadId.get()); + } + + @Override + protected void innerOnFailure(Exception e) { + ExceptionsHelper.reThrowIfNotNull(e); + } + }); + + if (success == false) { + fail("failed to register a completion listener as task resource tracking has already completed"); + } + } + + Object[] allocation1 = new Object[1000000]; // 4MB + + if (blockForCancellation) { + // Simulate a job that takes forever to finish + // Using periodic checks method to identify that the task was cancelled + try { + boolean taskCancelled = waitUntil(((CancellableTask) task)::isCancelled); + if (taskCancelled) { + throw new TaskCancelledException("Task Cancelled"); + } else { + fail("It should have thrown an exception"); + } + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + } + + } + + Object[] allocation2 = new Object[1000000]; // 4MB + } + }); + + try { + result.get(); + } catch (InterruptedException | ExecutionException e) { + throw new RuntimeException(e.getCause()); + } + + return new NodeResponse(clusterService.localNode()); + } + + @Override + protected NodeResponse nodeOperation(ResourceAwareNodeRequest request) { + throw new UnsupportedOperationException("the task parameter is required"); + } + } + + private TaskTestContext startResourceAwareNodesAction( + TestNode node, + boolean blockForCancellation, + TaskTestContext taskTestContext, + ActionListener listener + ) { + NodesRequest request = new NodesRequest("Test Request", node.getNodeId()); + + taskTestContext.requestCompleteLatch = new CountDownLatch(1); + + ResourceAwareNodesAction action = new ResourceAwareNodesAction( + "internal:resourceAction", + threadPool, + node.clusterService, + node.transportService, + blockForCancellation, + taskTestContext + ); + taskTestContext.mainTask = action.execute(request, listener); + return taskTestContext; + } + + private static class TaskTestContext { + private Task mainTask; + private CountDownLatch requestCompleteLatch; + private BiConsumer operationStartValidator; + private BiConsumer operationFinishedValidator; + private long memoryConsumptionWhenExecutionStarts; + } + + public void testBasicTaskResourceTracking() throws Exception { + setup(true, false); + + final AtomicReference throwableReference = new AtomicReference<>(); + final AtomicReference responseReference = new AtomicReference<>(); + TaskTestContext taskTestContext = new TaskTestContext(); + + Map resourceTasks = testNodes[0].taskResourceTrackingService.getResourceAwareTasks(); + + taskTestContext.operationStartValidator = (task, threadId) -> { + // One thread is currently working on task but not finished + assertEquals(1, resourceTasks.size()); + assertEquals(1, task.getResourceStats().size()); + assertEquals(1, task.getResourceStats().get(threadId).size()); + assertTrue(task.getResourceStats().get(threadId).get(0).isActive()); + assertEquals(0, task.getTotalResourceStats().getCpuTimeInNanos()); + assertEquals(0, task.getTotalResourceStats().getMemoryInBytes()); + }; + + taskTestContext.operationFinishedValidator = (task, threadId) -> { + // Thread has finished working on the task's runnable + assertEquals(0, resourceTasks.size()); + assertEquals(1, task.getResourceStats().size()); + assertEquals(1, task.getResourceStats().get(threadId).size()); + assertFalse(task.getResourceStats().get(threadId).get(0).isActive()); + + long expectedArrayAllocationOverhead = 2 * 4000000; // Task's memory overhead due to array allocations + long actualTaskMemoryOverhead = task.getTotalResourceStats().getMemoryInBytes(); + + assertMemoryUsageWithinLimits( + actualTaskMemoryOverhead - taskTestContext.memoryConsumptionWhenExecutionStarts, + expectedArrayAllocationOverhead + ); + assertTrue(task.getTotalResourceStats().getCpuTimeInNanos() > 0); + }; + + startResourceAwareNodesAction(testNodes[0], false, taskTestContext, new ActionListener() { + @Override + public void onResponse(NodesResponse listTasksResponse) { + responseReference.set(listTasksResponse); + taskTestContext.requestCompleteLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + throwableReference.set(e); + taskTestContext.requestCompleteLatch.countDown(); + } + }); + + // Waiting for whole request to complete and return successfully till client + taskTestContext.requestCompleteLatch.await(); + + assertTasksRequestFinishedSuccessfully(responseReference.get(), throwableReference.get()); + } + + public void testTaskResourceTrackingDuringTaskCancellation() throws Exception { + setup(true, false); + + final AtomicReference throwableReference = new AtomicReference<>(); + final AtomicReference responseReference = new AtomicReference<>(); + TaskTestContext taskTestContext = new TaskTestContext(); + + Map resourceTasks = testNodes[0].taskResourceTrackingService.getResourceAwareTasks(); + + taskTestContext.operationStartValidator = (task, threadId) -> { + // One thread is currently working on task but not finished + assertEquals(1, resourceTasks.size()); + assertEquals(1, task.getResourceStats().size()); + assertEquals(1, task.getResourceStats().get(threadId).size()); + assertTrue(task.getResourceStats().get(threadId).get(0).isActive()); + assertEquals(0, task.getTotalResourceStats().getCpuTimeInNanos()); + assertEquals(0, task.getTotalResourceStats().getMemoryInBytes()); + }; + + taskTestContext.operationFinishedValidator = (task, threadId) -> { + // Thread has finished working on the task's runnable + assertEquals(0, resourceTasks.size()); + assertEquals(1, task.getResourceStats().size()); + assertEquals(1, task.getResourceStats().get(threadId).size()); + assertFalse(task.getResourceStats().get(threadId).get(0).isActive()); + + // allocations are completed before the task is cancelled + long expectedArrayAllocationOverhead = 4000000; // Task's memory overhead due to array allocations + long taskCancellationOverhead = 30000; // Task cancellation overhead ~ 30Kb + long actualTaskMemoryOverhead = task.getTotalResourceStats().getMemoryInBytes(); + + long expectedOverhead = expectedArrayAllocationOverhead + taskCancellationOverhead; + assertMemoryUsageWithinLimits( + actualTaskMemoryOverhead - taskTestContext.memoryConsumptionWhenExecutionStarts, + expectedOverhead + ); + assertTrue(task.getTotalResourceStats().getCpuTimeInNanos() > 0); + }; + + startResourceAwareNodesAction(testNodes[0], true, taskTestContext, new ActionListener() { + @Override + public void onResponse(NodesResponse listTasksResponse) { + responseReference.set(listTasksResponse); + taskTestContext.requestCompleteLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + throwableReference.set(e); + taskTestContext.requestCompleteLatch.countDown(); + } + }); + + // Cancel main task + CancelTasksRequest request = new CancelTasksRequest(); + request.setReason("Cancelling request to verify Task resource tracking behaviour"); + request.setTaskId(new TaskId(testNodes[0].getNodeId(), taskTestContext.mainTask.getId())); + ActionTestUtils.executeBlocking(testNodes[0].transportCancelTasksAction, request); + + // Waiting for whole request to complete and return successfully till client + taskTestContext.requestCompleteLatch.await(); + + assertEquals(0, resourceTasks.size()); + assertNull(throwableReference.get()); + assertNotNull(responseReference.get()); + assertEquals(1, responseReference.get().failureCount()); + assertEquals(TaskCancelledException.class, findActualException(responseReference.get().failures().get(0)).getClass()); + } + + public void testTaskResourceTrackingDisabled() throws Exception { + setup(false, false); + + final AtomicReference throwableReference = new AtomicReference<>(); + final AtomicReference responseReference = new AtomicReference<>(); + TaskTestContext taskTestContext = new TaskTestContext(); + + Map resourceTasks = testNodes[0].taskResourceTrackingService.getResourceAwareTasks(); + + taskTestContext.operationStartValidator = (task, threadId) -> { assertEquals(0, resourceTasks.size()); }; + + taskTestContext.operationFinishedValidator = (task, threadId) -> { assertEquals(0, resourceTasks.size()); }; + + startResourceAwareNodesAction(testNodes[0], false, taskTestContext, new ActionListener() { + @Override + public void onResponse(NodesResponse listTasksResponse) { + responseReference.set(listTasksResponse); + taskTestContext.requestCompleteLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + throwableReference.set(e); + taskTestContext.requestCompleteLatch.countDown(); + } + }); + + // Waiting for whole request to complete and return successfully till client + taskTestContext.requestCompleteLatch.await(); + + assertTasksRequestFinishedSuccessfully(responseReference.get(), throwableReference.get()); + } + + public void testTaskResourceTrackingDisabledWhileTaskInProgress() throws Exception { + setup(true, false); + + final AtomicReference throwableReference = new AtomicReference<>(); + final AtomicReference responseReference = new AtomicReference<>(); + TaskTestContext taskTestContext = new TaskTestContext(); + + Map resourceTasks = testNodes[0].taskResourceTrackingService.getResourceAwareTasks(); + + taskTestContext.operationStartValidator = (task, threadId) -> { + // One thread is currently working on task but not finished + assertEquals(1, resourceTasks.size()); + assertEquals(1, task.getResourceStats().size()); + assertEquals(1, task.getResourceStats().get(threadId).size()); + assertTrue(task.getResourceStats().get(threadId).get(0).isActive()); + assertEquals(0, task.getTotalResourceStats().getCpuTimeInNanos()); + assertEquals(0, task.getTotalResourceStats().getMemoryInBytes()); + + testNodes[0].taskResourceTrackingService.setTaskResourceTrackingEnabled(false); + }; + + taskTestContext.operationFinishedValidator = (task, threadId) -> { + // Thread has finished working on the task's runnable + assertEquals(0, resourceTasks.size()); + assertEquals(1, task.getResourceStats().size()); + assertEquals(1, task.getResourceStats().get(threadId).size()); + assertFalse(task.getResourceStats().get(threadId).get(0).isActive()); + + long expectedArrayAllocationOverhead = 2 * 4000000; // Task's memory overhead due to array allocations + long actualTaskMemoryOverhead = task.getTotalResourceStats().getMemoryInBytes(); + + assertMemoryUsageWithinLimits( + actualTaskMemoryOverhead - taskTestContext.memoryConsumptionWhenExecutionStarts, + expectedArrayAllocationOverhead + ); + assertTrue(task.getTotalResourceStats().getCpuTimeInNanos() > 0); + }; + + startResourceAwareNodesAction(testNodes[0], false, taskTestContext, new ActionListener() { + @Override + public void onResponse(NodesResponse listTasksResponse) { + responseReference.set(listTasksResponse); + taskTestContext.requestCompleteLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + throwableReference.set(e); + taskTestContext.requestCompleteLatch.countDown(); + } + }); + + // Waiting for whole request to complete and return successfully till client + taskTestContext.requestCompleteLatch.await(); + + assertTasksRequestFinishedSuccessfully(responseReference.get(), throwableReference.get()); + } + + public void testTaskResourceTrackingEnabledWhileTaskInProgress() throws Exception { + setup(false, false); + + final AtomicReference throwableReference = new AtomicReference<>(); + final AtomicReference responseReference = new AtomicReference<>(); + TaskTestContext taskTestContext = new TaskTestContext(); + + Map resourceTasks = testNodes[0].taskResourceTrackingService.getResourceAwareTasks(); + + taskTestContext.operationStartValidator = (task, threadId) -> { + assertEquals(0, resourceTasks.size()); + + testNodes[0].taskResourceTrackingService.setTaskResourceTrackingEnabled(true); + }; + + taskTestContext.operationFinishedValidator = (task, threadId) -> { assertEquals(0, resourceTasks.size()); }; + + startResourceAwareNodesAction(testNodes[0], false, taskTestContext, new ActionListener() { + @Override + public void onResponse(NodesResponse listTasksResponse) { + responseReference.set(listTasksResponse); + taskTestContext.requestCompleteLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + throwableReference.set(e); + taskTestContext.requestCompleteLatch.countDown(); + } + }); + + // Waiting for whole request to complete and return successfully till client + taskTestContext.requestCompleteLatch.await(); + + assertTasksRequestFinishedSuccessfully(responseReference.get(), throwableReference.get()); + } + + public void testOnDemandRefreshWhileFetchingTasks() throws InterruptedException { + setup(true, false); + + final AtomicReference throwableReference = new AtomicReference<>(); + final AtomicReference responseReference = new AtomicReference<>(); + + TaskTestContext taskTestContext = new TaskTestContext(); + + Map resourceTasks = testNodes[0].taskResourceTrackingService.getResourceAwareTasks(); + + taskTestContext.operationStartValidator = (task, threadId) -> { + ListTasksResponse listTasksResponse = ActionTestUtils.executeBlocking( + testNodes[0].transportListTasksAction, + new ListTasksRequest().setActions("internal:resourceAction*").setDetailed(true) + ); + + TaskInfo taskInfo = listTasksResponse.getTasks().get(1); + + assertNotNull(taskInfo.getResourceStats()); + assertNotNull(taskInfo.getResourceStats().getResourceUsageInfo()); + assertTrue(taskInfo.getResourceStats().getResourceUsageInfo().get("total").getCpuTimeInNanos() > 0); + assertTrue(taskInfo.getResourceStats().getResourceUsageInfo().get("total").getMemoryInBytes() > 0); + }; + + taskTestContext.operationFinishedValidator = (task, threadId) -> { assertEquals(0, resourceTasks.size()); }; + + startResourceAwareNodesAction(testNodes[0], false, taskTestContext, new ActionListener() { + @Override + public void onResponse(NodesResponse listTasksResponse) { + responseReference.set(listTasksResponse); + taskTestContext.requestCompleteLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + throwableReference.set(e); + taskTestContext.requestCompleteLatch.countDown(); + } + }); + + // Waiting for whole request to complete and return successfully till client + taskTestContext.requestCompleteLatch.await(); + + assertTasksRequestFinishedSuccessfully(responseReference.get(), throwableReference.get()); + } + + public void testTaskIdPersistsInThreadContext() throws InterruptedException { + setup(true, true); + + final List taskIdsAddedToThreadContext = new ArrayList<>(); + final List taskIdsRemovedFromThreadContext = new ArrayList<>(); + AtomicLong actualTaskIdInThreadContext = new AtomicLong(-1); + AtomicLong expectedTaskIdInThreadContext = new AtomicLong(-2); + + ((MockTaskManager) testNodes[0].transportService.getTaskManager()).addListener(new MockTaskManagerListener() { + @Override + public void waitForTaskCompletion(Task task) {} + + @Override + public void taskExecutionStarted(Task task, Boolean closeableInvoked) { + if (closeableInvoked) { + taskIdsRemovedFromThreadContext.add(task.getId()); + } else { + taskIdsAddedToThreadContext.add(task.getId()); + } + } + + @Override + public void onTaskRegistered(Task task) {} + + @Override + public void onTaskUnregistered(Task task) { + if (task.getAction().equals("internal:resourceAction[n]")) { + expectedTaskIdInThreadContext.set(task.getId()); + actualTaskIdInThreadContext.set(threadPool.getThreadContext().getTransient(TASK_ID)); + } + } + }); + + TaskTestContext taskTestContext = new TaskTestContext(); + startResourceAwareNodesAction(testNodes[0], false, taskTestContext, new ActionListener() { + @Override + public void onResponse(NodesResponse listTasksResponse) { + taskTestContext.requestCompleteLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + taskTestContext.requestCompleteLatch.countDown(); + } + }); + + taskTestContext.requestCompleteLatch.await(); + + assertEquals(expectedTaskIdInThreadContext.get(), actualTaskIdInThreadContext.get()); + assertThat(taskIdsAddedToThreadContext, containsInAnyOrder(taskIdsRemovedFromThreadContext.toArray())); + } + + private void setup(boolean resourceTrackingEnabled, boolean useMockTaskManager) { + Settings settings = Settings.builder() + .put("task_resource_tracking.enabled", resourceTrackingEnabled) + .put(MockTaskManager.USE_MOCK_TASK_MANAGER_SETTING.getKey(), useMockTaskManager) + .build(); + setupTestNodes(settings); + connectNodes(testNodes[0]); + + runnableTaskListener.set(testNodes[0].taskResourceTrackingService); + } + + private Throwable findActualException(Exception e) { + Throwable throwable = e.getCause(); + while (throwable.getCause() != null) { + throwable = throwable.getCause(); + } + return throwable; + } + + private void assertTasksRequestFinishedSuccessfully(NodesResponse nodesResponse, Throwable throwable) { + assertNull(throwable); + assertNotNull(nodesResponse); + assertEquals(0, nodesResponse.failureCount()); + } + + private void assertMemoryUsageWithinLimits(long actual, long expected) { + // 5% buffer up to 200 KB to account for classloading overhead. + long maxOverhead = Math.min(200000, expected * 5 / 100); + assertThat(actual, lessThanOrEqualTo(expected + maxOverhead)); + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java index 4383b21aa7e74..fd6f5d17a3a80 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java @@ -59,8 +59,10 @@ import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.tasks.TaskCancellationService; import org.opensearch.tasks.TaskManager; +import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.tasks.MockTaskManager; +import org.opensearch.threadpool.RunnableTaskExecutionListener; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -74,6 +76,7 @@ import java.util.List; import java.util.Set; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; import static java.util.Collections.emptyMap; @@ -89,10 +92,12 @@ public abstract class TaskManagerTestCase extends OpenSearchTestCase { protected ThreadPool threadPool; protected TestNode[] testNodes; protected int nodesCount; + protected AtomicReference runnableTaskListener; @Before public void setupThreadPool() { - threadPool = new TestThreadPool(TransportTasksActionTests.class.getSimpleName()); + runnableTaskListener = new AtomicReference<>(); + threadPool = new TestThreadPool(TransportTasksActionTests.class.getSimpleName(), runnableTaskListener); } public void setupTestNodes(Settings settings) { @@ -225,14 +230,22 @@ protected TaskManager createTaskManager(Settings settings, ThreadPool threadPool transportService.start(); clusterService = createClusterService(threadPool, discoveryNode.get()); clusterService.addStateApplier(transportService.getTaskManager()); + taskResourceTrackingService = new TaskResourceTrackingService(settings, clusterService.getClusterSettings(), threadPool); + transportService.getTaskManager().setTaskResourceTrackingService(taskResourceTrackingService); ActionFilters actionFilters = new ActionFilters(emptySet()); - transportListTasksAction = new TransportListTasksAction(clusterService, transportService, actionFilters); + transportListTasksAction = new TransportListTasksAction( + clusterService, + transportService, + actionFilters, + taskResourceTrackingService + ); transportCancelTasksAction = new TransportCancelTasksAction(clusterService, transportService, actionFilters); transportService.acceptIncomingRequests(); } public final ClusterService clusterService; public final TransportService transportService; + public final TaskResourceTrackingService taskResourceTrackingService; private final SetOnce discoveryNode = new SetOnce<>(); public final TransportListTasksAction transportListTasksAction; public final TransportCancelTasksAction transportCancelTasksAction; diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java index 4b98870422ce8..202f1b7dcb5b4 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java @@ -91,6 +91,7 @@ import static java.util.Collections.emptyMap; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Answers.RETURNS_MOCKS; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyInt; import static org.mockito.Mockito.anyString; @@ -224,7 +225,7 @@ public void setupAction() { remoteResponseHandler = ArgumentCaptor.forClass(TransportResponseHandler.class); // setup services that will be called by action - transportService = mock(TransportService.class); + transportService = mock(TransportService.class, RETURNS_MOCKS); clusterService = mock(ClusterService.class); localIngest = true; // setup nodes for local and remote diff --git a/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java b/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java index 9c70accaca3e4..64286e47b4966 100644 --- a/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java +++ b/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java @@ -48,6 +48,7 @@ import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.sameInstance; +import static org.opensearch.tasks.TaskResourceTrackingService.TASK_ID; public class ThreadContextTests extends OpenSearchTestCase { @@ -154,6 +155,15 @@ public void testNewContextWithClearedTransients() { assertEquals(1, threadContext.getResponseHeaders().get("baz").size()); } + public void testStashContextWithPreservedTransients() { + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + threadContext.putTransient("foo", "bar"); + threadContext.putTransient(TASK_ID, 1); + threadContext.stashContext(); + assertNull(threadContext.getTransient("foo")); + assertEquals(1, (int) threadContext.getTransient(TASK_ID)); + } + public void testStashWithOrigin() { final String origin = randomAlphaOfLengthBetween(4, 16); final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 39f86942f2c4b..4b8eec70f2c1a 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -203,6 +203,7 @@ import org.opensearch.search.fetch.FetchPhase; import org.opensearch.search.query.QueryPhase; import org.opensearch.snapshots.mockstore.MockEventuallyConsistentRepository; +import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.disruption.DisruptableMockTransport; import org.opensearch.threadpool.ThreadPool; @@ -1754,6 +1755,8 @@ public void onFailure(final Exception e) { final IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver( new ThreadContext(Settings.EMPTY) ); + transportService.getTaskManager() + .setTaskResourceTrackingService(new TaskResourceTrackingService(settings, clusterSettings, threadPool)); repositoriesService = new RepositoriesService( settings, clusterService, diff --git a/server/src/test/java/org/opensearch/tasks/TaskManagerTests.java b/server/src/test/java/org/opensearch/tasks/TaskManagerTests.java index 0f09b0de34206..ab49109eb8247 100644 --- a/server/src/test/java/org/opensearch/tasks/TaskManagerTests.java +++ b/server/src/test/java/org/opensearch/tasks/TaskManagerTests.java @@ -40,6 +40,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.RunnableTaskExecutionListener; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.FakeTcpChannel; @@ -59,6 +60,7 @@ import java.util.Set; import java.util.concurrent.Phaser; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; @@ -67,10 +69,12 @@ public class TaskManagerTests extends OpenSearchTestCase { private ThreadPool threadPool; + private AtomicReference runnableTaskListener; @Before public void setupThreadPool() { - threadPool = new TestThreadPool(TransportTasksActionTests.class.getSimpleName()); + runnableTaskListener = new AtomicReference<>(); + threadPool = new TestThreadPool(TransportTasksActionTests.class.getSimpleName(), runnableTaskListener); } @After diff --git a/server/src/test/java/org/opensearch/tasks/TaskResourceTrackingServiceTests.java b/server/src/test/java/org/opensearch/tasks/TaskResourceTrackingServiceTests.java new file mode 100644 index 0000000000000..8ba23c5d3219c --- /dev/null +++ b/server/src/test/java/org/opensearch/tasks/TaskResourceTrackingServiceTests.java @@ -0,0 +1,97 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.tasks; + +import org.junit.After; +import org.junit.Before; +import org.opensearch.action.admin.cluster.node.tasks.TransportTasksActionTests; +import org.opensearch.action.search.SearchTask; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; + +import java.util.HashMap; +import java.util.concurrent.atomic.AtomicReference; + +import static org.opensearch.tasks.ResourceStats.MEMORY; +import static org.opensearch.tasks.TaskResourceTrackingService.TASK_ID; + +public class TaskResourceTrackingServiceTests extends OpenSearchTestCase { + + private ThreadPool threadPool; + private TaskResourceTrackingService taskResourceTrackingService; + + @Before + public void setup() { + threadPool = new TestThreadPool(TransportTasksActionTests.class.getSimpleName(), new AtomicReference<>()); + taskResourceTrackingService = new TaskResourceTrackingService( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool + ); + } + + @After + public void terminateThreadPool() { + terminate(threadPool); + } + + public void testThreadContextUpdateOnTrackingStart() { + taskResourceTrackingService.setTaskResourceTrackingEnabled(true); + + Task task = new SearchTask(1, "test", "test", () -> "Test", TaskId.EMPTY_TASK_ID, new HashMap<>()); + + String key = "KEY"; + String value = "VALUE"; + + // Prepare thread context + threadPool.getThreadContext().putHeader(key, value); + threadPool.getThreadContext().putTransient(key, value); + threadPool.getThreadContext().addResponseHeader(key, value); + + ThreadContext.StoredContext storedContext = taskResourceTrackingService.startTracking(task); + + // All headers should be preserved and Task Id should also be included in thread context + verifyThreadContextFixedHeaders(key, value); + assertEquals((long) threadPool.getThreadContext().getTransient(TASK_ID), task.getId()); + + storedContext.restore(); + + // Post restore only task id should be removed from the thread context + verifyThreadContextFixedHeaders(key, value); + assertNull(threadPool.getThreadContext().getTransient(TASK_ID)); + } + + public void testStopTrackingHandlesCurrentActiveThread() { + taskResourceTrackingService.setTaskResourceTrackingEnabled(true); + Task task = new SearchTask(1, "test", "test", () -> "Test", TaskId.EMPTY_TASK_ID, new HashMap<>()); + ThreadContext.StoredContext storedContext = taskResourceTrackingService.startTracking(task); + long threadId = Thread.currentThread().getId(); + taskResourceTrackingService.taskExecutionStartedOnThread(task.getId(), threadId); + + assertTrue(task.getResourceStats().get(threadId).get(0).isActive()); + assertEquals(0, task.getResourceStats().get(threadId).get(0).getResourceUsageInfo().getStatsInfo().get(MEMORY).getTotalValue()); + + taskResourceTrackingService.stopTracking(task); + + // Makes sure stop tracking marks the current active thread inactive and refreshes the resource stats before returning. + assertFalse(task.getResourceStats().get(threadId).get(0).isActive()); + assertTrue(task.getResourceStats().get(threadId).get(0).getResourceUsageInfo().getStatsInfo().get(MEMORY).getTotalValue() > 0); + } + + private void verifyThreadContextFixedHeaders(String key, String value) { + assertEquals(threadPool.getThreadContext().getHeader(key), value); + assertEquals(threadPool.getThreadContext().getTransient(key), value); + assertEquals(threadPool.getThreadContext().getResponseHeaders().get(key).get(0), value); + } + +} diff --git a/test/framework/src/main/java/org/opensearch/test/tasks/MockTaskManager.java b/test/framework/src/main/java/org/opensearch/test/tasks/MockTaskManager.java index e60871f67ea54..677ec7a0a6600 100644 --- a/test/framework/src/main/java/org/opensearch/test/tasks/MockTaskManager.java +++ b/test/framework/src/main/java/org/opensearch/test/tasks/MockTaskManager.java @@ -39,6 +39,7 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskAwareRequest; import org.opensearch.tasks.TaskManager; @@ -127,6 +128,21 @@ public void waitForTaskCompletion(Task task, long untilInNanos) { super.waitForTaskCompletion(task, untilInNanos); } + @Override + public ThreadContext.StoredContext taskExecutionStarted(Task task) { + for (MockTaskManagerListener listener : listeners) { + listener.taskExecutionStarted(task, false); + } + + ThreadContext.StoredContext storedContext = super.taskExecutionStarted(task); + return () -> { + for (MockTaskManagerListener listener : listeners) { + listener.taskExecutionStarted(task, true); + } + storedContext.restore(); + }; + } + public void addListener(MockTaskManagerListener listener) { listeners.add(listener); } diff --git a/test/framework/src/main/java/org/opensearch/test/tasks/MockTaskManagerListener.java b/test/framework/src/main/java/org/opensearch/test/tasks/MockTaskManagerListener.java index eb8361ac552fc..f15f878995aa2 100644 --- a/test/framework/src/main/java/org/opensearch/test/tasks/MockTaskManagerListener.java +++ b/test/framework/src/main/java/org/opensearch/test/tasks/MockTaskManagerListener.java @@ -43,4 +43,7 @@ public interface MockTaskManagerListener { void onTaskUnregistered(Task task); void waitForTaskCompletion(Task task); + + void taskExecutionStarted(Task task, Boolean closeableInvoked); + } diff --git a/test/framework/src/main/java/org/opensearch/threadpool/TestThreadPool.java b/test/framework/src/main/java/org/opensearch/threadpool/TestThreadPool.java index 5f8611d99f0a0..2d97d5bffee01 100644 --- a/test/framework/src/main/java/org/opensearch/threadpool/TestThreadPool.java +++ b/test/framework/src/main/java/org/opensearch/threadpool/TestThreadPool.java @@ -40,6 +40,7 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.atomic.AtomicReference; public class TestThreadPool extends ThreadPool { @@ -47,12 +48,29 @@ public class TestThreadPool extends ThreadPool { private volatile boolean returnRejectingExecutor = false; private volatile ThreadPoolExecutor rejectingExecutor; + public TestThreadPool( + String name, + AtomicReference runnableTaskListener, + ExecutorBuilder... customBuilders + ) { + this(name, Settings.EMPTY, runnableTaskListener, customBuilders); + } + public TestThreadPool(String name, ExecutorBuilder... customBuilders) { this(name, Settings.EMPTY, customBuilders); } public TestThreadPool(String name, Settings settings, ExecutorBuilder... customBuilders) { - super(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), name).put(settings).build(), customBuilders); + this(name, settings, null, customBuilders); + } + + public TestThreadPool( + String name, + Settings settings, + AtomicReference runnableTaskListener, + ExecutorBuilder... customBuilders + ) { + super(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), name).put(settings).build(), runnableTaskListener, customBuilders); } @Override From fcdea3ab47b9f4ace8dff21f49cb2dcc4b98e9ad Mon Sep 17 00:00:00 2001 From: Bin Wei Date: Tue, 2 Aug 2022 20:38:44 +0800 Subject: [PATCH 055/104] Rename Class ending with Plugin to Module under modules dir (#4042) * Rename Class ending with Plugin to Module under modules dir (#4034) Signed-off-by: rockybean * Rename from Module to ModulePlugin Signed-off-by: rockybean --- modules/aggs-matrix-stats/build.gradle | 2 +- ...ava => MatrixAggregationModulePlugin.java} | 2 +- .../stats/InternalMatrixStatsTests.java | 4 +-- .../stats/MatrixStatsAggregatorTests.java | 4 +-- modules/analysis-common/build.gradle | 2 +- .../common/QueryStringWithAnalyzersIT.java | 2 +- ...n.java => CommonAnalysisModulePlugin.java} | 4 +-- .../ASCIIFoldingTokenFilterFactoryTests.java | 4 +-- ...rdDelimiterTokenFilterFactoryTestCase.java | 14 +++++----- .../common/CJKFilterFactoryTests.java | 2 +- .../common/CommonAnalysisFactoryTests.java | 2 +- .../CommonGramsTokenFilterFactoryTests.java | 4 +-- .../common/CompoundAnalysisTests.java | 4 +-- ...ncatenateGraphTokenFilterFactoryTests.java | 16 ++++++------ .../common/DisableGraphQueryTests.java | 2 +- .../EdgeNGramTokenFilterFactoryTests.java | 4 +-- .../common/EdgeNGramTokenizerTests.java | 2 +- .../common/ElisionFilterFactoryTests.java | 2 +- .../common/HighlighterWithAnalyzersTests.java | 2 +- .../common/KeepFilterFactoryTests.java | 12 ++++----- .../common/KeepTypesFilterFactoryTests.java | 12 ++++++--- .../KeywordMarkerFilterFactoryTests.java | 6 ++--- .../LimitTokenCountFilterFactoryTests.java | 2 +- .../analysis/common/MassiveWordListTests.java | 2 +- .../common/MinHashFilterFactoryTests.java | 10 +++++-- .../common/MultiplexerTokenFilterTests.java | 4 +-- .../common/NGramTokenFilterFactoryTests.java | 4 +-- .../PatternCaptureTokenFilterTests.java | 2 +- .../PredicateTokenScriptFilterTests.java | 2 +- .../RemoveDuplicatesFilterFactoryTests.java | 5 +++- .../ScriptedConditionTokenFilterTests.java | 2 +- .../common/ShingleTokenFilterTests.java | 2 +- ...temmerOverrideTokenFilterFactoryTests.java | 2 +- .../StemmerTokenFilterFactoryTests.java | 2 +- .../common/SynonymsAnalysisTests.java | 22 ++++++++-------- .../analysis/common/TrimTokenFilterTests.java | 5 +++- ...DelimiterGraphTokenFilterFactoryTests.java | 15 ++++++----- .../WordDelimiterTokenFilterFactoryTests.java | 2 +- modules/geo/build.gradle | 2 +- .../{GeoPlugin.java => GeoModulePlugin.java} | 2 +- modules/ingest-common/build.gradle | 2 +- .../ingest/common/IngestRestartIT.java | 2 +- ...gin.java => IngestCommonModulePlugin.java} | 4 +-- modules/ingest-geoip/build.gradle | 2 +- .../geoip/GeoIpProcessorNonIngestNodeIT.java | 2 +- .../ingest/geoip/GeoIpProcessor.java | 2 +- ...ugin.java => IngestGeoIpModulePlugin.java} | 2 +- .../geoip/GeoIpProcessorFactoryTests.java | 18 ++++++------- .../ingest/geoip/GeoIpProcessorTests.java | 2 +- ...java => IngestGeoIpModulePluginTests.java} | 4 +-- modules/ingest-user-agent/build.gradle | 2 +- ....java => IngestUserAgentModulePlugin.java} | 4 +-- .../ingest/useragent/UserAgentProcessor.java | 8 +++++- .../UserAgentProcessorFactoryTests.java | 2 +- modules/lang-expression/build.gradle | 2 +- .../script/expression/MoreExpressionIT.java | 2 +- .../script/expression/StoredExpressionIT.java | 2 +- ...lugin.java => ExpressionModulePlugin.java} | 2 +- modules/lang-mustache/build.gradle | 2 +- .../mustache/MultiSearchTemplateIT.java | 2 +- .../script/mustache/SearchTemplateIT.java | 2 +- ...ePlugin.java => MustacheModulePlugin.java} | 2 +- modules/lang-painless/build.gradle | 2 +- ...sPlugin.java => PainlessModulePlugin.java} | 2 +- .../action/PainlessExecuteApiTests.java | 4 +-- modules/mapper-extras/build.gradle | 2 +- ...gin.java => MapperExtrasModulePlugin.java} | 2 +- .../index/mapper/BWCTemplateTests.java | 2 +- .../mapper/RankFeatureFieldMapperTests.java | 2 +- .../RankFeatureMetaFieldMapperTests.java | 2 +- .../mapper/RankFeaturesFieldMapperTests.java | 2 +- .../mapper/ScaledFloatFieldMapperTests.java | 2 +- .../SearchAsYouTypeFieldMapperTests.java | 2 +- .../mapper/TokenCountFieldMapperTests.java | 2 +- .../query/RankFeatureQueryBuilderTests.java | 4 +-- modules/opensearch-dashboards/build.gradle | 2 +- ... => OpenSearchDashboardsModulePlugin.java} | 2 +- ...penSearchDashboardsModulePluginTests.java} | 14 +++++----- modules/parent-join/build.gradle | 2 +- .../join/query/ParentChildTestCase.java | 4 +-- ...lugin.java => ParentJoinModulePlugin.java} | 4 +-- .../join/aggregations/ChildrenTests.java | 4 +-- .../ChildrenToParentAggregatorTests.java | 4 +-- .../aggregations/InternalChildrenTests.java | 4 +-- .../aggregations/InternalParentTests.java | 4 +-- .../join/aggregations/ParentTests.java | 4 +-- .../ParentToChildrenAggregatorTests.java | 4 +-- .../mapper/ParentJoinFieldMapperTests.java | 4 +-- .../join/query/HasChildQueryBuilderTests.java | 4 +-- .../query/HasParentQueryBuilderTests.java | 4 +-- .../join/query/ParentIdQueryBuilderTests.java | 4 +-- modules/percolator/build.gradle | 2 +- .../percolator/PercolatorQuerySearchIT.java | 4 +-- ...lugin.java => PercolatorModulePlugin.java} | 2 +- .../percolator/CandidateQueryTests.java | 2 +- .../PercolateQueryBuilderTests.java | 2 +- .../PercolatorFieldMapperTests.java | 9 +++++-- .../PercolatorQuerySearchTests.java | 2 +- modules/rank-eval/build.gradle | 2 +- .../index/rankeval/RankEvalRequestIT.java | 2 +- ...lPlugin.java => RankEvalModulePlugin.java} | 2 +- .../index/rankeval/EvalQueryQualityTests.java | 6 +++-- .../index/rankeval/RankEvalRequestTests.java | 8 +++--- .../index/rankeval/RankEvalSpecTests.java | 2 +- modules/reindex/build.gradle | 2 +- .../documentation/ReindexDocumentationIT.java | 4 +-- ...exPlugin.java => ReindexModulePlugin.java} | 6 ++--- .../reindex/spi/RemoteReindexExtension.java | 3 ++- .../ReindexFromRemoteWithAuthTests.java | 6 ++--- .../reindex/ReindexRenamedSettingTests.java | 2 +- .../index/reindex/ReindexSingleNodeTests.java | 2 +- .../index/reindex/ReindexTestCase.java | 2 +- .../opensearch/index/reindex/RetryTests.java | 4 +-- modules/repository-url/build.gradle | 2 +- .../url/URLSnapshotRestoreIT.java | 4 +-- ...in.java => URLRepositoryModulePlugin.java} | 2 +- modules/systemd/build.gradle | 2 +- ...mdPlugin.java => SystemdModulePlugin.java} | 8 +++--- ...sts.java => SystemdModulePluginTests.java} | 26 +++++++++++-------- modules/transport-netty4/build.gradle | 2 +- .../OpenSearchNetty4IntegTestCase.java | 8 +++--- .../Netty4TransportPublishAddressIT.java | 4 +-- ...ty4Plugin.java => Netty4ModulePlugin.java} | 2 +- .../opensearch/http/HttpSmokeTestCase.java | 18 ++++++------- 124 files changed, 278 insertions(+), 241 deletions(-) rename modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/{MatrixAggregationPlugin.java => MatrixAggregationModulePlugin.java} (95%) rename modules/analysis-common/src/main/java/org/opensearch/analysis/common/{CommonAnalysisPlugin.java => CommonAnalysisModulePlugin.java} (99%) rename modules/geo/src/main/java/org/opensearch/geo/{GeoPlugin.java => GeoModulePlugin.java} (95%) rename modules/ingest-common/src/main/java/org/opensearch/ingest/common/{IngestCommonPlugin.java => IngestCommonModulePlugin.java} (98%) rename modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/{IngestGeoIpPlugin.java => IngestGeoIpModulePlugin.java} (99%) rename modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/{IngestGeoIpPluginTests.java => IngestGeoIpModulePluginTests.java} (95%) rename modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/{IngestUserAgentPlugin.java => IngestUserAgentModulePlugin.java} (96%) rename modules/lang-expression/src/main/java/org/opensearch/script/expression/{ExpressionPlugin.java => ExpressionModulePlugin.java} (95%) rename modules/lang-mustache/src/main/java/org/opensearch/script/mustache/{MustachePlugin.java => MustacheModulePlugin.java} (96%) rename modules/lang-painless/src/main/java/org/opensearch/painless/{PainlessPlugin.java => PainlessModulePlugin.java} (98%) rename modules/mapper-extras/src/main/java/org/opensearch/index/mapper/{MapperExtrasPlugin.java => MapperExtrasModulePlugin.java} (96%) rename modules/opensearch-dashboards/src/main/java/org/opensearch/dashboards/{OpenSearchDashboardsPlugin.java => OpenSearchDashboardsModulePlugin.java} (98%) rename modules/opensearch-dashboards/src/test/java/org/opensearch/dashboards/{OpenSearchDashboardsPluginTests.java => OpenSearchDashboardsModulePluginTests.java} (79%) rename modules/parent-join/src/main/java/org/opensearch/join/{ParentJoinPlugin.java => ParentJoinModulePlugin.java} (95%) rename modules/percolator/src/main/java/org/opensearch/percolator/{PercolatorPlugin.java => PercolatorModulePlugin.java} (96%) rename modules/rank-eval/src/main/java/org/opensearch/index/rankeval/{RankEvalPlugin.java => RankEvalModulePlugin.java} (98%) rename modules/reindex/src/main/java/org/opensearch/index/reindex/{ReindexPlugin.java => ReindexModulePlugin.java} (96%) rename modules/repository-url/src/main/java/org/opensearch/plugin/repository/url/{URLRepositoryPlugin.java => URLRepositoryModulePlugin.java} (96%) rename modules/systemd/src/main/java/org/opensearch/systemd/{SystemdPlugin.java => SystemdModulePlugin.java} (96%) rename modules/systemd/src/test/java/org/opensearch/systemd/{SystemdPluginTests.java => SystemdModulePluginTests.java} (87%) rename modules/transport-netty4/src/main/java/org/opensearch/transport/{Netty4Plugin.java => Netty4ModulePlugin.java} (98%) diff --git a/modules/aggs-matrix-stats/build.gradle b/modules/aggs-matrix-stats/build.gradle index dd3aee61f7664..705fa17456a79 100644 --- a/modules/aggs-matrix-stats/build.gradle +++ b/modules/aggs-matrix-stats/build.gradle @@ -31,7 +31,7 @@ apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { description 'Adds aggregations whose input are a list of numeric fields and output includes a matrix.' - classname 'org.opensearch.search.aggregations.matrix.MatrixAggregationPlugin' + classname 'org.opensearch.search.aggregations.matrix.MatrixAggregationModulePlugin' hasClientJar = true } diff --git a/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/MatrixAggregationPlugin.java b/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/MatrixAggregationModulePlugin.java similarity index 95% rename from modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/MatrixAggregationPlugin.java rename to modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/MatrixAggregationModulePlugin.java index debeacffe321e..df1926282d500 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/MatrixAggregationPlugin.java +++ b/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/MatrixAggregationModulePlugin.java @@ -42,7 +42,7 @@ import static java.util.Collections.singletonList; -public class MatrixAggregationPlugin extends Plugin implements SearchPlugin { +public class MatrixAggregationModulePlugin extends Plugin implements SearchPlugin { @Override public List getAggregations() { return singletonList( diff --git a/modules/aggs-matrix-stats/src/test/java/org/opensearch/search/aggregations/matrix/stats/InternalMatrixStatsTests.java b/modules/aggs-matrix-stats/src/test/java/org/opensearch/search/aggregations/matrix/stats/InternalMatrixStatsTests.java index e523c77e0786f..cc16b9b23b5d5 100644 --- a/modules/aggs-matrix-stats/src/test/java/org/opensearch/search/aggregations/matrix/stats/InternalMatrixStatsTests.java +++ b/modules/aggs-matrix-stats/src/test/java/org/opensearch/search/aggregations/matrix/stats/InternalMatrixStatsTests.java @@ -43,7 +43,7 @@ import org.opensearch.search.aggregations.Aggregation; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.ParsedAggregation; -import org.opensearch.search.aggregations.matrix.MatrixAggregationPlugin; +import org.opensearch.search.aggregations.matrix.MatrixAggregationModulePlugin; import org.opensearch.search.aggregations.matrix.stats.InternalMatrixStats.Fields; import org.opensearch.search.aggregations.pipeline.PipelineAggregator.PipelineTree; import org.opensearch.test.InternalAggregationTestCase; @@ -64,7 +64,7 @@ public class InternalMatrixStatsTests extends InternalAggregationTestCase getSearchPlugins() { - return Collections.singletonList(new MatrixAggregationPlugin()); + return Collections.singletonList(new MatrixAggregationModulePlugin()); } } diff --git a/modules/analysis-common/build.gradle b/modules/analysis-common/build.gradle index be0acf7218c1e..58ecf79cda0d7 100644 --- a/modules/analysis-common/build.gradle +++ b/modules/analysis-common/build.gradle @@ -32,7 +32,7 @@ apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { description 'Adds "built in" analyzers to OpenSearch.' - classname 'org.opensearch.analysis.common.CommonAnalysisPlugin' + classname 'org.opensearch.analysis.common.CommonAnalysisModulePlugin' extendedPlugins = ['lang-painless'] } diff --git a/modules/analysis-common/src/internalClusterTest/java/org/opensearch/analysis/common/QueryStringWithAnalyzersIT.java b/modules/analysis-common/src/internalClusterTest/java/org/opensearch/analysis/common/QueryStringWithAnalyzersIT.java index 8c2f83bf83d85..785e597857825 100644 --- a/modules/analysis-common/src/internalClusterTest/java/org/opensearch/analysis/common/QueryStringWithAnalyzersIT.java +++ b/modules/analysis-common/src/internalClusterTest/java/org/opensearch/analysis/common/QueryStringWithAnalyzersIT.java @@ -48,7 +48,7 @@ public class QueryStringWithAnalyzersIT extends OpenSearchIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(CommonAnalysisPlugin.class); + return Arrays.asList(CommonAnalysisModulePlugin.class); } /** diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java similarity index 99% rename from modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisPlugin.java rename to modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java index c69917ed52be8..57865e15d523a 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java @@ -167,9 +167,9 @@ import static org.opensearch.plugins.AnalysisPlugin.requiresAnalysisSettings; -public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin, ScriptPlugin { +public class CommonAnalysisModulePlugin extends Plugin implements AnalysisPlugin, ScriptPlugin { - private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(CommonAnalysisPlugin.class); + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(CommonAnalysisModulePlugin.class); private final SetOnce scriptService = new SetOnce<>(); diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/ASCIIFoldingTokenFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/ASCIIFoldingTokenFilterFactoryTests.java index 1a4651dc23fff..d107977237b9e 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/ASCIIFoldingTokenFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/ASCIIFoldingTokenFilterFactoryTests.java @@ -51,7 +51,7 @@ public void testDefault() throws IOException { .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .put("index.analysis.filter.my_ascii_folding.type", "asciifolding") .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_ascii_folding"); String source = "Ansprüche"; @@ -68,7 +68,7 @@ public void testPreserveOriginal() throws IOException { .put("index.analysis.filter.my_ascii_folding.type", "asciifolding") .put("index.analysis.filter.my_ascii_folding.preserve_original", true) .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_ascii_folding"); String source = "Ansprüche"; diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/BaseWordDelimiterTokenFilterFactoryTestCase.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/BaseWordDelimiterTokenFilterFactoryTestCase.java index 9d54776755766..829ace512b5c8 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/BaseWordDelimiterTokenFilterFactoryTestCase.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/BaseWordDelimiterTokenFilterFactoryTestCase.java @@ -60,7 +60,7 @@ public void testDefault() throws IOException { .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .put("index.analysis.filter.my_word_delimiter.type", type) .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter"); String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's"; @@ -78,7 +78,7 @@ public void testCatenateWords() throws IOException { .put("index.analysis.filter.my_word_delimiter.catenate_words", "true") .put("index.analysis.filter.my_word_delimiter.generate_word_parts", "false") .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter"); String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's"; @@ -96,7 +96,7 @@ public void testCatenateNumbers() throws IOException { .put("index.analysis.filter.my_word_delimiter.generate_number_parts", "false") .put("index.analysis.filter.my_word_delimiter.catenate_numbers", "true") .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter"); String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's"; @@ -115,7 +115,7 @@ public void testCatenateAll() throws IOException { .put("index.analysis.filter.my_word_delimiter.generate_number_parts", "false") .put("index.analysis.filter.my_word_delimiter.catenate_all", "true") .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter"); String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's"; @@ -132,7 +132,7 @@ public void testSplitOnCaseChange() throws IOException { .put("index.analysis.filter.my_word_delimiter.type", type) .put("index.analysis.filter.my_word_delimiter.split_on_case_change", "false") .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter"); String source = "PowerShot"; @@ -149,7 +149,7 @@ public void testPreserveOriginal() throws IOException { .put("index.analysis.filter.my_word_delimiter.type", type) .put("index.analysis.filter.my_word_delimiter.preserve_original", "true") .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter"); String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's"; @@ -186,7 +186,7 @@ public void testStemEnglishPossessive() throws IOException { .put("index.analysis.filter.my_word_delimiter.type", type) .put("index.analysis.filter.my_word_delimiter.stem_english_possessive", "false") .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter"); String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's"; diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CJKFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CJKFilterFactoryTests.java index f2c0d9859cbe4..2f33194125652 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CJKFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CJKFilterFactoryTests.java @@ -52,7 +52,7 @@ public class CJKFilterFactoryTests extends OpenSearchTokenStreamTestCase { @Before public void setup() throws IOException { - analysis = AnalysisTestsHelper.createTestAnalysisFromClassPath(createTempDir(), RESOURCE, new CommonAnalysisPlugin()); + analysis = AnalysisTestsHelper.createTestAnalysisFromClassPath(createTempDir(), RESOURCE, new CommonAnalysisModulePlugin()); } public void testDefault() throws IOException { diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CommonAnalysisFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CommonAnalysisFactoryTests.java index 4cf0d1de28717..1c4db089565ff 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CommonAnalysisFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CommonAnalysisFactoryTests.java @@ -50,7 +50,7 @@ public class CommonAnalysisFactoryTests extends AnalysisFactoryTestCase { public CommonAnalysisFactoryTests() { - super(new CommonAnalysisPlugin()); + super(new CommonAnalysisModulePlugin()); } @Override diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CommonGramsTokenFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CommonGramsTokenFilterFactoryTests.java index 04570be7a6f9e..713d61f294630 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CommonGramsTokenFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CommonGramsTokenFilterFactoryTests.java @@ -58,7 +58,7 @@ public void testDefault() throws IOException { .build(); try { - AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin()); + AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisModulePlugin()); Assert.fail("[common_words] or [common_words_path] is set"); } catch (IllegalArgumentException e) {} catch (IOException e) { fail("expected IAE"); @@ -333,7 +333,7 @@ private Path createHome() throws IOException { } private static OpenSearchTestCase.TestAnalysis createTestAnalysisFromSettings(Settings settings) throws IOException { - return AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin()); + return AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisModulePlugin()); } } diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CompoundAnalysisTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CompoundAnalysisTests.java index e5ce7c818f72b..32556db3939b8 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CompoundAnalysisTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CompoundAnalysisTests.java @@ -102,8 +102,8 @@ private List analyze(Settings settings, String analyzerName, String text } private AnalysisModule createAnalysisModule(Settings settings) throws IOException { - CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin(); - return new AnalysisModule(TestEnvironment.newEnvironment(settings), Arrays.asList(commonAnalysisPlugin, new AnalysisPlugin() { + CommonAnalysisModulePlugin commonAnalysisModulePlugin = new CommonAnalysisModulePlugin(); + return new AnalysisModule(TestEnvironment.newEnvironment(settings), Arrays.asList(commonAnalysisModulePlugin, new AnalysisPlugin() { @Override public Map> getTokenFilters() { return singletonMap("myfilter", MyFilterTokenFilterFactory::new); diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/ConcatenateGraphTokenFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/ConcatenateGraphTokenFilterFactoryTests.java index eaf571e7469d6..1a78690dffcf7 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/ConcatenateGraphTokenFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/ConcatenateGraphTokenFilterFactoryTests.java @@ -29,7 +29,7 @@ public class ConcatenateGraphTokenFilterFactoryTests extends OpenSearchTokenStre public void testSimpleTokenizerAndConcatenate() throws IOException { OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings( Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("concatenate_graph"); @@ -47,7 +47,7 @@ public void testTokenizerCustomizedSeparator() throws IOException { .put("index.analysis.filter.my_concatenate_graph.type", "concatenate_graph") .put("index.analysis.filter.my_concatenate_graph.token_separator", "+") .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_concatenate_graph"); @@ -65,7 +65,7 @@ public void testTokenizerEmptySeparator() throws IOException { .put("index.analysis.filter.my_concatenate_graph.type", "concatenate_graph") .put("index.analysis.filter.my_concatenate_graph.token_separator", "") .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_concatenate_graph"); @@ -83,7 +83,7 @@ public void testPreservePositionIncrementsDefault() throws IOException { .put("index.analysis.filter.my_concatenate_graph.type", "concatenate_graph") .put("index.analysis.filter.my_concatenate_graph.token_separator", "+") .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_concatenate_graph"); @@ -106,7 +106,7 @@ public void testPreservePositionIncrementsTrue() throws IOException { .put("index.analysis.filter.my_concatenate_graph.token_separator", "+") .put("index.analysis.filter.my_concatenate_graph.preserve_position_increments", "true") .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_concatenate_graph"); @@ -132,7 +132,7 @@ public void testGraph() throws IOException { .put("index.analysis.analyzer.my_analyzer.tokenizer", "whitespace") .put("index.analysis.analyzer.my_analyzer.filter", "my_word_delimiter, my_concatenate_graph") .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); String source = "PowerShot Is AweSome"; @@ -166,7 +166,7 @@ public void testInvalidSeparator() { .put("index.analysis.filter.my_concatenate_graph.type", "concatenate_graph") .put("index.analysis.filter.my_concatenate_graph.token_separator", "11") .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ) ); } @@ -187,7 +187,7 @@ public void testMaxGraphExpansion() throws IOException { .put("index.analysis.analyzer.my_analyzer.tokenizer", "whitespace") .put("index.analysis.analyzer.my_analyzer.filter", "my_word_delimiter, my_concatenate_graph") .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); String source = "PowerShot Is AweSome"; diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/DisableGraphQueryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/DisableGraphQueryTests.java index 35915af8f263d..9bfc3a77e8c44 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/DisableGraphQueryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/DisableGraphQueryTests.java @@ -74,7 +74,7 @@ public class DisableGraphQueryTests extends OpenSearchSingleNodeTestCase { @Override protected Collection> getPlugins() { - return Collections.singleton(CommonAnalysisPlugin.class); + return Collections.singleton(CommonAnalysisModulePlugin.class); } @Before diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/EdgeNGramTokenFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/EdgeNGramTokenFilterFactoryTests.java index b3724d99f10ed..e62a9c52edc5c 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/EdgeNGramTokenFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/EdgeNGramTokenFilterFactoryTests.java @@ -52,7 +52,7 @@ public void testDefault() throws IOException { .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .put("index.analysis.filter.my_edge_ngram.type", "edge_ngram") .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_edge_ngram"); String source = "foo"; @@ -69,7 +69,7 @@ public void testPreserveOriginal() throws IOException { .put("index.analysis.filter.my_edge_ngram.type", "edge_ngram") .put("index.analysis.filter.my_edge_ngram.preserve_original", true) .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_edge_ngram"); String source = "foo"; diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/EdgeNGramTokenizerTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/EdgeNGramTokenizerTests.java index e77f895d05661..34fdec4135bfe 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/EdgeNGramTokenizerTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/EdgeNGramTokenizerTests.java @@ -60,7 +60,7 @@ private IndexAnalyzers buildAnalyzers(Version version, String tokenizer) throws .put("index.analysis.analyzer.my_analyzer.tokenizer", tokenizer) .build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings); - return new AnalysisModule(TestEnvironment.newEnvironment(settings), Collections.singletonList(new CommonAnalysisPlugin())) + return new AnalysisModule(TestEnvironment.newEnvironment(settings), Collections.singletonList(new CommonAnalysisModulePlugin())) .getAnalysisRegistry() .build(idxSettings); } diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/ElisionFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/ElisionFilterFactoryTests.java index 164068eab5e1f..fc5c9ce49bbc9 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/ElisionFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/ElisionFilterFactoryTests.java @@ -49,7 +49,7 @@ public void testElisionFilterWithNoArticles() throws IOException { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin()) + () -> AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisModulePlugin()) ); assertEquals("elision filter requires [articles] or [articles_path] setting", e.getMessage()); diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/HighlighterWithAnalyzersTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/HighlighterWithAnalyzersTests.java index 57c959a4f0b65..74ed3cd79e753 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/HighlighterWithAnalyzersTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/HighlighterWithAnalyzersTests.java @@ -65,7 +65,7 @@ public class HighlighterWithAnalyzersTests extends OpenSearchIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(CommonAnalysisPlugin.class); + return Arrays.asList(CommonAnalysisModulePlugin.class); } public void testNgramHighlightingWithBrokenPositions() throws IOException { diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/KeepFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/KeepFilterFactoryTests.java index 0b094e52df8a1..41f27cd8b9136 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/KeepFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/KeepFilterFactoryTests.java @@ -54,7 +54,7 @@ public void testLoadWithoutSettings() throws IOException { OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromClassPath( createTempDir(), RESOURCE, - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("keep"); Assert.assertNull(tokenFilter); @@ -68,7 +68,7 @@ public void testLoadOverConfiguredSettings() { .put("index.analysis.filter.broken_keep_filter.keep_words", "[\"Hello\", \"worlD\"]") .build(); try { - AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin()); + AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisModulePlugin()); Assert.fail("path and array are configured"); } catch (IllegalArgumentException e) {} catch (IOException e) { fail("expected IAE"); @@ -83,7 +83,7 @@ public void testKeepWordsPathSettings() { .build(); try { // test our none existing setup is picked up - AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin()); + AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisModulePlugin()); fail("expected an exception due to non existent keep_words_path"); } catch (IllegalArgumentException e) {} catch (IOException e) { fail("expected IAE"); @@ -92,7 +92,7 @@ public void testKeepWordsPathSettings() { settings = Settings.builder().put(settings).putList("index.analysis.filter.non_broken_keep_filter.keep_words", "test").build(); try { // test our none existing setup is picked up - AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin()); + AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisModulePlugin()); fail("expected an exception indicating that you can't use [keep_words_path] with [keep_words] "); } catch (IllegalArgumentException e) {} catch (IOException e) { fail("expected IAE"); @@ -104,7 +104,7 @@ public void testCaseInsensitiveMapping() throws IOException { OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromClassPath( createTempDir(), RESOURCE, - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_keep_filter"); assertThat(tokenFilter, instanceOf(KeepWordFilterFactory.class)); @@ -119,7 +119,7 @@ public void testCaseSensitiveMapping() throws IOException { OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromClassPath( createTempDir(), RESOURCE, - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_case_sensitive_keep_filter"); assertThat(tokenFilter, instanceOf(KeepWordFilterFactory.class)); diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/KeepTypesFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/KeepTypesFilterFactoryTests.java index 1f1021b4bfe66..eaab746be26dc 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/KeepTypesFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/KeepTypesFilterFactoryTests.java @@ -63,7 +63,10 @@ public void testKeepTypesInclude() throws IOException { ); } Settings settings = settingsBuilder.build(); - OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin()); + OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings( + settings, + new CommonAnalysisModulePlugin() + ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("keep_numbers"); assertThat(tokenFilter, instanceOf(KeepTypesFilterFactory.class)); String source = "Hello 123 world"; @@ -80,7 +83,10 @@ public void testKeepTypesExclude() throws IOException { .putList(BASE_SETTING + "." + KeepTypesFilterFactory.KEEP_TYPES_KEY, new String[] { "", "" }) .put(BASE_SETTING + "." + KeepTypesFilterFactory.KEEP_TYPES_MODE_KEY, KeepTypesFilterFactory.KeepTypesMode.EXCLUDE) .build(); - OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin()); + OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings( + settings, + new CommonAnalysisModulePlugin() + ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("keep_numbers"); assertThat(tokenFilter, instanceOf(KeepTypesFilterFactory.class)); String source = "Hello 123 world"; @@ -99,7 +105,7 @@ public void testKeepTypesException() throws IOException { .build(); IllegalArgumentException ex = expectThrows( IllegalArgumentException.class, - () -> AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin()) + () -> AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisModulePlugin()) ); assertEquals("`keep_types` tokenfilter mode can only be [include] or [exclude] but was [bad_parameter].", ex.getMessage()); } diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/KeywordMarkerFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/KeywordMarkerFilterFactoryTests.java index 40e354785ddbe..f9c5a25444ed0 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/KeywordMarkerFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/KeywordMarkerFilterFactoryTests.java @@ -65,7 +65,7 @@ public void testKeywordSet() throws IOException { .put("index.analysis.analyzer.my_keyword.filter", "my_keyword, porter_stem") .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); - TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin()); + TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisModulePlugin()); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_keyword"); assertThat(tokenFilter, instanceOf(KeywordMarkerTokenFilterFactory.class)); TokenStream filter = tokenFilter.create(new WhitespaceTokenizer()); @@ -87,7 +87,7 @@ public void testKeywordPattern() throws IOException { .put("index.analysis.analyzer.my_keyword.filter", "my_keyword, porter_stem") .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); - TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin()); + TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisModulePlugin()); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_keyword"); assertThat(tokenFilter, instanceOf(KeywordMarkerTokenFilterFactory.class)); TokenStream filter = tokenFilter.create(new WhitespaceTokenizer()); @@ -112,7 +112,7 @@ public void testCannotSpecifyBothKeywordsAndPattern() throws IOException { .build(); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin()) + () -> AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisModulePlugin()) ); assertEquals("cannot specify both `keywords_pattern` and `keywords` or `keywords_path`", e.getMessage()); } diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/LimitTokenCountFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/LimitTokenCountFilterFactoryTests.java index 76471fd98e5fe..99708045b0be2 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/LimitTokenCountFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/LimitTokenCountFilterFactoryTests.java @@ -119,7 +119,7 @@ public void testSettings() throws IOException { } private static OpenSearchTestCase.TestAnalysis createTestAnalysisFromSettings(Settings settings) throws IOException { - return AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin()); + return AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisModulePlugin()); } } diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MassiveWordListTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MassiveWordListTests.java index 390e36c4ca0a0..41f60e1264b5c 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MassiveWordListTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MassiveWordListTests.java @@ -43,7 +43,7 @@ public class MassiveWordListTests extends OpenSearchSingleNodeTestCase { @Override protected Collection> getPlugins() { - return Collections.singleton(CommonAnalysisPlugin.class); + return Collections.singleton(CommonAnalysisModulePlugin.class); } public void testCreateIndexWithMassiveWordList() { diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MinHashFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MinHashFilterFactoryTests.java index 514c53f17456c..e86a939dc857b 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MinHashFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MinHashFilterFactoryTests.java @@ -50,7 +50,10 @@ public void testDefault() throws IOException { int default_bucket_size = 512; int default_hash_set_size = 1; Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build(); - OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin()); + OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings( + settings, + new CommonAnalysisModulePlugin() + ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("min_hash"); String source = "the quick brown fox"; Tokenizer tokenizer = new WhitespaceTokenizer(); @@ -70,7 +73,10 @@ public void testSettings() throws IOException { .put("index.analysis.filter.test_min_hash.with_rotation", false) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); - OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin()); + OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings( + settings, + new CommonAnalysisModulePlugin() + ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("test_min_hash"); String source = "sushi"; Tokenizer tokenizer = new WhitespaceTokenizer(); diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MultiplexerTokenFilterTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MultiplexerTokenFilterTests.java index 167f61464da1b..e9dfa299871e5 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MultiplexerTokenFilterTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MultiplexerTokenFilterTests.java @@ -65,7 +65,7 @@ public void testMultiplexingFilter() throws IOException { IndexAnalyzers indexAnalyzers = new AnalysisModule( TestEnvironment.newEnvironment(settings), - Collections.singletonList(new CommonAnalysisPlugin()) + Collections.singletonList(new CommonAnalysisModulePlugin()) ).getAnalysisRegistry().build(idxSettings); try (NamedAnalyzer analyzer = indexAnalyzers.get("myAnalyzer")) { @@ -99,7 +99,7 @@ public void testMultiplexingNoOriginal() throws IOException { IndexAnalyzers indexAnalyzers = new AnalysisModule( TestEnvironment.newEnvironment(settings), - Collections.singletonList(new CommonAnalysisPlugin()) + Collections.singletonList(new CommonAnalysisModulePlugin()) ).getAnalysisRegistry().build(idxSettings); try (NamedAnalyzer analyzer = indexAnalyzers.get("myAnalyzer")) { diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/NGramTokenFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/NGramTokenFilterFactoryTests.java index 85090648096d1..e5f558b1c2fdd 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/NGramTokenFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/NGramTokenFilterFactoryTests.java @@ -52,7 +52,7 @@ public void testDefault() throws IOException { .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .put("index.analysis.filter.my_ngram.type", "ngram") .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_ngram"); String source = "foo"; @@ -69,7 +69,7 @@ public void testPreserveOriginal() throws IOException { .put("index.analysis.filter.my_ngram.type", "ngram") .put("index.analysis.filter.my_ngram.preserve_original", true) .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_ngram"); String source = "foo"; diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/PatternCaptureTokenFilterTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/PatternCaptureTokenFilterTests.java index 5cd18a5b01f18..a3dc75fd37671 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/PatternCaptureTokenFilterTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/PatternCaptureTokenFilterTests.java @@ -55,7 +55,7 @@ public void testPatternCaptureTokenFilter() throws Exception { .build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); - IndexAnalyzers indexAnalyzers = createTestAnalysis(idxSettings, settings, new CommonAnalysisPlugin()).indexAnalyzers; + IndexAnalyzers indexAnalyzers = createTestAnalysis(idxSettings, settings, new CommonAnalysisModulePlugin()).indexAnalyzers; NamedAnalyzer analyzer1 = indexAnalyzers.get("single"); assertTokenStreamContents(analyzer1.tokenStream("test", "foobarbaz"), new String[] { "foobarbaz", "foobar", "foo" }); diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/PredicateTokenScriptFilterTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/PredicateTokenScriptFilterTests.java index c16f4f37846ec..b31f4020ef627 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/PredicateTokenScriptFilterTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/PredicateTokenScriptFilterTests.java @@ -81,7 +81,7 @@ public FactoryType compile(Script script, ScriptContext FactoryType compile(Script script, ScriptContext { indexAnalyzers = createTestAnalysis(idxSettings, settings, new CommonAnalysisPlugin()).indexAnalyzers; } + () -> { indexAnalyzers = createTestAnalysis(idxSettings, settings, new CommonAnalysisModulePlugin()).indexAnalyzers; } ); } @@ -259,7 +259,7 @@ public void testTokenFiltersBypassSynonymAnalysis() throws IOException { String[] bypassingFactories = new String[] { "dictionary_decompounder" }; - CommonAnalysisPlugin plugin = new CommonAnalysisPlugin(); + CommonAnalysisModulePlugin plugin = new CommonAnalysisModulePlugin(); for (String factory : bypassingFactories) { TokenFilterFactory tff = plugin.getTokenFilters().get(factory).get(idxSettings, null, factory, settings); TokenizerFactory tok = new KeywordTokenizerFactory(idxSettings, null, "keyword", settings); @@ -294,7 +294,7 @@ public void testPreconfiguredTokenFilters() throws IOException { .build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); - CommonAnalysisPlugin plugin = new CommonAnalysisPlugin(); + CommonAnalysisModulePlugin plugin = new CommonAnalysisModulePlugin(); for (PreConfiguredTokenFilter tf : plugin.getPreConfiguredTokenFilters()) { if (disallowedFilters.contains(tf.getName())) { @@ -319,7 +319,7 @@ public void testDisallowedTokenFilters() throws IOException { .put("output_unigrams", "true") .build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); - CommonAnalysisPlugin plugin = new CommonAnalysisPlugin(); + CommonAnalysisModulePlugin plugin = new CommonAnalysisModulePlugin(); String[] disallowedFactories = new String[] { "multiplexer", diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/TrimTokenFilterTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/TrimTokenFilterTests.java index 3ea9c526052f2..a5419df92db07 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/TrimTokenFilterTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/TrimTokenFilterTests.java @@ -49,7 +49,10 @@ public void testNormalizer() throws IOException { .putList("index.analysis.normalizer.my_normalizer.filter", "trim") .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); - OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin()); + OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings( + settings, + new CommonAnalysisModulePlugin() + ); assertNull(analysis.indexAnalyzers.get("my_normalizer")); NamedAnalyzer normalizer = analysis.indexAnalyzers.getNormalizer("my_normalizer"); assertNotNull(normalizer); diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/WordDelimiterGraphTokenFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/WordDelimiterGraphTokenFilterFactoryTests.java index 102182f381128..7a717fe7fe22e 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/WordDelimiterGraphTokenFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/WordDelimiterGraphTokenFilterFactoryTests.java @@ -64,7 +64,7 @@ public void testMultiTerms() throws IOException { .put("index.analysis.filter.my_word_delimiter.catenate_all", "true") .put("index.analysis.filter.my_word_delimiter.preserve_original", "true") .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter"); @@ -114,7 +114,7 @@ public void testPartsAndCatenate() throws IOException { .put("index.analysis.filter.my_word_delimiter.catenate_words", "true") .put("index.analysis.filter.my_word_delimiter.generate_word_parts", "true") .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter"); String source = "PowerShot"; @@ -146,7 +146,7 @@ public void testAdjustingOffsets() throws IOException { .put("index.analysis.filter.my_word_delimiter.generate_word_parts", "true") .put("index.analysis.filter.my_word_delimiter.adjust_offsets", "false") .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter"); String source = "PowerShot"; @@ -181,7 +181,10 @@ public void testIgnoreKeywords() throws IOException { .put("index.analysis.analyzer.my_analyzer.filter", "my_keyword, my_word_delimiter") .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); - OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin()); + OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings( + settings, + new CommonAnalysisModulePlugin() + ); String source = "PowerShot PowerHungry"; int[] expectedStartOffsets = new int[] { 0, 5, 10, 15 }; int[] expectedEndOffsets = new int[] { 5, 9, 15, 21 }; @@ -191,7 +194,7 @@ public void testIgnoreKeywords() throws IOException { // test with keywords but ignore_keywords is set as true settings = Settings.builder().put(settings).put("index.analysis.filter.my_word_delimiter.ignore_keywords", "true").build(); - analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin()); + analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisModulePlugin()); analyzer = analysis.indexAnalyzers.get("my_analyzer"); expectedStartOffsets = new int[] { 0, 5, 10 }; expectedEndOffsets = new int[] { 5, 9, 21 }; @@ -213,7 +216,7 @@ public void testPreconfiguredFilter() throws IOException { try ( IndexAnalyzers indexAnalyzers = new AnalysisModule( TestEnvironment.newEnvironment(settings), - Collections.singletonList(new CommonAnalysisPlugin()) + Collections.singletonList(new CommonAnalysisModulePlugin()) ).getAnalysisRegistry().build(idxSettings) ) { diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/WordDelimiterTokenFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/WordDelimiterTokenFilterFactoryTests.java index ea37fd5ce9546..b6e064f72630a 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/WordDelimiterTokenFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/WordDelimiterTokenFilterFactoryTests.java @@ -58,7 +58,7 @@ public void testPartsAndCatenate() throws IOException { .put("index.analysis.filter.my_word_delimiter.catenate_words", "true") .put("index.analysis.filter.my_word_delimiter.generate_word_parts", "true") .build(), - new CommonAnalysisPlugin() + new CommonAnalysisModulePlugin() ); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter"); String source = "PowerShot"; diff --git a/modules/geo/build.gradle b/modules/geo/build.gradle index d78e83ec7c4c6..1146b6924b2f8 100644 --- a/modules/geo/build.gradle +++ b/modules/geo/build.gradle @@ -31,7 +31,7 @@ apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { description 'Placeholder plugin for geospatial features in OpenSearch. only registers geo_shape field mapper for now' - classname 'org.opensearch.geo.GeoPlugin' + classname 'org.opensearch.geo.GeoModulePlugin' } restResources { diff --git a/modules/geo/src/main/java/org/opensearch/geo/GeoPlugin.java b/modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java similarity index 95% rename from modules/geo/src/main/java/org/opensearch/geo/GeoPlugin.java rename to modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java index 9b898da33bb12..ed8c7c06fb3e8 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/GeoPlugin.java +++ b/modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java @@ -40,7 +40,7 @@ import java.util.Collections; import java.util.Map; -public class GeoPlugin extends Plugin implements MapperPlugin { +public class GeoModulePlugin extends Plugin implements MapperPlugin { @Override public Map getMappers() { diff --git a/modules/ingest-common/build.gradle b/modules/ingest-common/build.gradle index 34201069d7b7b..7b567eb9110c5 100644 --- a/modules/ingest-common/build.gradle +++ b/modules/ingest-common/build.gradle @@ -32,7 +32,7 @@ apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { description 'Module for ingest processors that do not require additional security permissions or have large dependencies and resources' - classname 'org.opensearch.ingest.common.IngestCommonPlugin' + classname 'org.opensearch.ingest.common.IngestCommonModulePlugin' extendedPlugins = ['lang-painless'] } diff --git a/modules/ingest-common/src/internalClusterTest/java/org/opensearch/ingest/common/IngestRestartIT.java b/modules/ingest-common/src/internalClusterTest/java/org/opensearch/ingest/common/IngestRestartIT.java index f27c903d8795f..784dad8cea49f 100644 --- a/modules/ingest-common/src/internalClusterTest/java/org/opensearch/ingest/common/IngestRestartIT.java +++ b/modules/ingest-common/src/internalClusterTest/java/org/opensearch/ingest/common/IngestRestartIT.java @@ -63,7 +63,7 @@ public class IngestRestartIT extends OpenSearchIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(IngestCommonPlugin.class, CustomScriptPlugin.class); + return Arrays.asList(IngestCommonModulePlugin.class, CustomScriptPlugin.class); } @Override diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonPlugin.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java similarity index 98% rename from modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonPlugin.java rename to modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java index 969f77aa85152..c786785a008d7 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonPlugin.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java @@ -60,7 +60,7 @@ import java.util.Map; import java.util.function.Supplier; -public class IngestCommonPlugin extends Plugin implements ActionPlugin, IngestPlugin { +public class IngestCommonModulePlugin extends Plugin implements ActionPlugin, IngestPlugin { static final Setting WATCHDOG_INTERVAL = Setting.timeSetting( "ingest.grok.watchdog.interval", @@ -73,7 +73,7 @@ public class IngestCommonPlugin extends Plugin implements ActionPlugin, IngestPl Setting.Property.NodeScope ); - public IngestCommonPlugin() {} + public IngestCommonModulePlugin() {} @Override public Map getProcessors(Processor.Parameters parameters) { diff --git a/modules/ingest-geoip/build.gradle b/modules/ingest-geoip/build.gradle index f3be0fe61d4be..7dce788f3a4a4 100644 --- a/modules/ingest-geoip/build.gradle +++ b/modules/ingest-geoip/build.gradle @@ -35,7 +35,7 @@ apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { description 'Ingest processor that uses looksup geo data based on ip adresses using the Maxmind geo database' - classname 'org.opensearch.ingest.geoip.IngestGeoIpPlugin' + classname 'org.opensearch.ingest.geoip.IngestGeoIpModulePlugin' } dependencies { diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/opensearch/ingest/geoip/GeoIpProcessorNonIngestNodeIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/opensearch/ingest/geoip/GeoIpProcessorNonIngestNodeIT.java index e88c77b8e33f4..65378ca79041c 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/opensearch/ingest/geoip/GeoIpProcessorNonIngestNodeIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/opensearch/ingest/geoip/GeoIpProcessorNonIngestNodeIT.java @@ -75,7 +75,7 @@ public List> getSettings() { @Override protected Collection> nodePlugins() { - return Arrays.asList(IngestGeoIpPlugin.class, IngestGeoIpSettingsPlugin.class); + return Arrays.asList(IngestGeoIpModulePlugin.class, IngestGeoIpSettingsPlugin.class); } @Override diff --git a/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/GeoIpProcessor.java b/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/GeoIpProcessor.java index 030f75bf48e18..ebffe61f6e756 100644 --- a/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/GeoIpProcessor.java +++ b/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/GeoIpProcessor.java @@ -49,7 +49,7 @@ import org.opensearch.ingest.AbstractProcessor; import org.opensearch.ingest.IngestDocument; import org.opensearch.ingest.Processor; -import org.opensearch.ingest.geoip.IngestGeoIpPlugin.GeoIpCache; +import org.opensearch.ingest.geoip.IngestGeoIpModulePlugin.GeoIpCache; import java.io.IOException; import java.net.InetAddress; diff --git a/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/IngestGeoIpPlugin.java b/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/IngestGeoIpModulePlugin.java similarity index 99% rename from modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/IngestGeoIpPlugin.java rename to modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/IngestGeoIpModulePlugin.java index 790a9bb4bf978..7869e4a7de7fc 100644 --- a/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/IngestGeoIpPlugin.java +++ b/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/IngestGeoIpModulePlugin.java @@ -64,7 +64,7 @@ import java.util.function.Function; import java.util.stream.Stream; -public class IngestGeoIpPlugin extends Plugin implements IngestPlugin, Closeable { +public class IngestGeoIpModulePlugin extends Plugin implements IngestPlugin, Closeable { public static final Setting CACHE_SIZE = Setting.longSetting("ingest.geoip.cache_size", 1000, 0, Setting.Property.NodeScope); static String[] DEFAULT_DATABASE_FILENAMES = new String[] { "GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb" }; diff --git a/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/GeoIpProcessorFactoryTests.java b/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/GeoIpProcessorFactoryTests.java index cda2f5692b0db..d459686162cd0 100644 --- a/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/GeoIpProcessorFactoryTests.java +++ b/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/GeoIpProcessorFactoryTests.java @@ -37,7 +37,7 @@ import org.opensearch.common.Randomness; import org.opensearch.index.VersionType; import org.opensearch.ingest.IngestDocument; -import org.opensearch.ingest.geoip.IngestGeoIpPlugin.GeoIpCache; +import org.opensearch.ingest.geoip.IngestGeoIpModulePlugin.GeoIpCache; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.StreamsUtils; import org.junit.AfterClass; @@ -73,7 +73,7 @@ public static void loadDatabaseReaders() throws IOException { Files.createDirectories(geoIpConfigDir); copyDatabaseFiles(geoIpDir); - databaseReaders = IngestGeoIpPlugin.loadDatabaseReaders(geoIpDir, geoIpConfigDir); + databaseReaders = IngestGeoIpModulePlugin.loadDatabaseReaders(geoIpDir, geoIpConfigDir); } @AfterClass @@ -279,7 +279,7 @@ public void testLazyLoading() throws Exception { // Loading another database reader instances, because otherwise we can't test lazy loading as the // database readers used at class level are reused between tests. (we want to keep that otherwise running this // test will take roughly 4 times more time) - Map databaseReaders = IngestGeoIpPlugin.loadDatabaseReaders(geoIpDir, geoIpConfigDir); + Map databaseReaders = IngestGeoIpModulePlugin.loadDatabaseReaders(geoIpDir, geoIpConfigDir); GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders, new GeoIpCache(1000)); for (DatabaseReaderLazyLoader lazyLoader : databaseReaders.values()) { assertNull(lazyLoader.databaseReader.get()); @@ -336,7 +336,7 @@ public void testLoadingCustomDatabase() throws IOException { * Loading another database reader instances, because otherwise we can't test lazy loading as the database readers used at class * level are reused between tests. (we want to keep that otherwise running this test will take roughly 4 times more time). */ - final Map databaseReaders = IngestGeoIpPlugin.loadDatabaseReaders(geoIpDir, geoIpConfigDir); + final Map databaseReaders = IngestGeoIpModulePlugin.loadDatabaseReaders(geoIpDir, geoIpConfigDir); final GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseReaders, new GeoIpCache(1000)); for (DatabaseReaderLazyLoader lazyLoader : databaseReaders.values()) { assertNull(lazyLoader.databaseReader.get()); @@ -365,9 +365,9 @@ public void testDatabaseNotExistsInDir() throws IOException { Files.createDirectories(geoIpConfigDir); } copyDatabaseFiles(geoIpDir); - final String databaseFilename = randomFrom(IngestGeoIpPlugin.DEFAULT_DATABASE_FILENAMES); + final String databaseFilename = randomFrom(IngestGeoIpModulePlugin.DEFAULT_DATABASE_FILENAMES); Files.delete(geoIpDir.resolve(databaseFilename)); - final IOException e = expectThrows(IOException.class, () -> IngestGeoIpPlugin.loadDatabaseReaders(geoIpDir, geoIpConfigDir)); + final IOException e = expectThrows(IOException.class, () -> IngestGeoIpModulePlugin.loadDatabaseReaders(geoIpDir, geoIpConfigDir)); assertThat(e, hasToString(containsString("expected database [" + databaseFilename + "] to exist in [" + geoIpDir + "]"))); } @@ -377,9 +377,9 @@ public void testDatabaseExistsInConfigDir() throws IOException { final Path geoIpConfigDir = configDir.resolve("ingest-geoip"); Files.createDirectories(geoIpConfigDir); copyDatabaseFiles(geoIpDir); - final String databaseFilename = randomFrom(IngestGeoIpPlugin.DEFAULT_DATABASE_FILENAMES); + final String databaseFilename = randomFrom(IngestGeoIpModulePlugin.DEFAULT_DATABASE_FILENAMES); copyDatabaseFile(geoIpConfigDir, databaseFilename); - final IOException e = expectThrows(IOException.class, () -> IngestGeoIpPlugin.loadDatabaseReaders(geoIpDir, geoIpConfigDir)); + final IOException e = expectThrows(IOException.class, () -> IngestGeoIpModulePlugin.loadDatabaseReaders(geoIpDir, geoIpConfigDir)); assertThat(e, hasToString(containsString("expected database [" + databaseFilename + "] to not exist in [" + geoIpConfigDir + "]"))); } @@ -388,7 +388,7 @@ private static void copyDatabaseFile(final Path path, final String databaseFilen } private static void copyDatabaseFiles(final Path path) throws IOException { - for (final String databaseFilename : IngestGeoIpPlugin.DEFAULT_DATABASE_FILENAMES) { + for (final String databaseFilename : IngestGeoIpModulePlugin.DEFAULT_DATABASE_FILENAMES) { copyDatabaseFile(path, databaseFilename); } } diff --git a/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/GeoIpProcessorTests.java b/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/GeoIpProcessorTests.java index 34c80fec520aa..8b94e8cc114ed 100644 --- a/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/GeoIpProcessorTests.java +++ b/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/GeoIpProcessorTests.java @@ -37,7 +37,7 @@ import org.opensearch.common.io.PathUtils; import org.opensearch.ingest.IngestDocument; import org.opensearch.ingest.RandomDocumentPicks; -import org.opensearch.ingest.geoip.IngestGeoIpPlugin.GeoIpCache; +import org.opensearch.ingest.geoip.IngestGeoIpModulePlugin.GeoIpCache; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpPluginTests.java b/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpModulePluginTests.java similarity index 95% rename from modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpPluginTests.java rename to modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpModulePluginTests.java index 540d68b0982eb..e79354b3d7cd0 100644 --- a/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpPluginTests.java +++ b/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpModulePluginTests.java @@ -34,12 +34,12 @@ import com.maxmind.geoip2.model.AbstractResponse; import org.opensearch.common.network.InetAddresses; -import org.opensearch.ingest.geoip.IngestGeoIpPlugin.GeoIpCache; +import org.opensearch.ingest.geoip.IngestGeoIpModulePlugin.GeoIpCache; import org.opensearch.test.OpenSearchTestCase; import static org.mockito.Mockito.mock; -public class IngestGeoIpPluginTests extends OpenSearchTestCase { +public class IngestGeoIpModulePluginTests extends OpenSearchTestCase { public void testCachesAndEvictsResults() { GeoIpCache cache = new GeoIpCache(1); diff --git a/modules/ingest-user-agent/build.gradle b/modules/ingest-user-agent/build.gradle index a3752ad1c7f7e..187e72d192a3d 100644 --- a/modules/ingest-user-agent/build.gradle +++ b/modules/ingest-user-agent/build.gradle @@ -31,7 +31,7 @@ apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { description 'Ingest processor that extracts information from a user agent' - classname 'org.opensearch.ingest.useragent.IngestUserAgentPlugin' + classname 'org.opensearch.ingest.useragent.IngestUserAgentModulePlugin' } restResources { diff --git a/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/IngestUserAgentPlugin.java b/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/IngestUserAgentModulePlugin.java similarity index 96% rename from modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/IngestUserAgentPlugin.java rename to modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/IngestUserAgentModulePlugin.java index dc005ae36dff8..cd96fcf2347ef 100644 --- a/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/IngestUserAgentPlugin.java +++ b/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/IngestUserAgentModulePlugin.java @@ -49,7 +49,7 @@ import java.util.Map; import java.util.stream.Stream; -public class IngestUserAgentPlugin extends Plugin implements IngestPlugin { +public class IngestUserAgentModulePlugin extends Plugin implements IngestPlugin { private final Setting CACHE_SIZE_SETTING = Setting.longSetting( "ingest.user_agent.cache_size", @@ -85,7 +85,7 @@ static Map createUserAgentParsers(Path userAgentConfigD UserAgentParser defaultParser = new UserAgentParser( DEFAULT_PARSER_NAME, - IngestUserAgentPlugin.class.getResourceAsStream("/regexes.yml"), + IngestUserAgentModulePlugin.class.getResourceAsStream("/regexes.yml"), cache ); userAgentParsers.put(DEFAULT_PARSER_NAME, defaultParser); diff --git a/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/UserAgentProcessor.java b/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/UserAgentProcessor.java index 0625f1f8fd1af..df88e98e7fc4f 100644 --- a/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/UserAgentProcessor.java +++ b/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/UserAgentProcessor.java @@ -313,7 +313,13 @@ public UserAgentProcessor create( ) throws Exception { String field = readStringProperty(TYPE, processorTag, config, "field"); String targetField = readStringProperty(TYPE, processorTag, config, "target_field", "user_agent"); - String regexFilename = readStringProperty(TYPE, processorTag, config, "regex_file", IngestUserAgentPlugin.DEFAULT_PARSER_NAME); + String regexFilename = readStringProperty( + TYPE, + processorTag, + config, + "regex_file", + IngestUserAgentModulePlugin.DEFAULT_PARSER_NAME + ); List propertyNames = readOptionalList(TYPE, processorTag, config, "properties"); boolean ignoreMissing = readBooleanProperty(TYPE, processorTag, config, "ignore_missing", false); boolean useECS = readBooleanProperty(TYPE, processorTag, config, "ecs", true); diff --git a/modules/ingest-user-agent/src/test/java/org/opensearch/ingest/useragent/UserAgentProcessorFactoryTests.java b/modules/ingest-user-agent/src/test/java/org/opensearch/ingest/useragent/UserAgentProcessorFactoryTests.java index 72815a37f46de..576136436ef33 100644 --- a/modules/ingest-user-agent/src/test/java/org/opensearch/ingest/useragent/UserAgentProcessorFactoryTests.java +++ b/modules/ingest-user-agent/src/test/java/org/opensearch/ingest/useragent/UserAgentProcessorFactoryTests.java @@ -91,7 +91,7 @@ public static void createUserAgentParsers() throws IOException { } } - userAgentParsers = IngestUserAgentPlugin.createUserAgentParsers(userAgentConfigDir, new UserAgentCache(1000)); + userAgentParsers = IngestUserAgentModulePlugin.createUserAgentParsers(userAgentConfigDir, new UserAgentCache(1000)); } public void testBuildDefaults() throws Exception { diff --git a/modules/lang-expression/build.gradle b/modules/lang-expression/build.gradle index 203c332069c5f..114c750e34b17 100644 --- a/modules/lang-expression/build.gradle +++ b/modules/lang-expression/build.gradle @@ -32,7 +32,7 @@ apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { description 'Lucene expressions integration for OpenSearch' - classname 'org.opensearch.script.expression.ExpressionPlugin' + classname 'org.opensearch.script.expression.ExpressionModulePlugin' } dependencies { diff --git a/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/MoreExpressionIT.java b/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/MoreExpressionIT.java index 952b00dda608c..69d5e639e4aec 100644 --- a/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/MoreExpressionIT.java +++ b/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/MoreExpressionIT.java @@ -78,7 +78,7 @@ public class MoreExpressionIT extends OpenSearchIntegTestCase { @Override protected Collection> nodePlugins() { - return Collections.singleton(ExpressionPlugin.class); + return Collections.singleton(ExpressionModulePlugin.class); } private SearchRequestBuilder buildRequest(String script, Object... params) { diff --git a/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/StoredExpressionIT.java b/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/StoredExpressionIT.java index 5aade265439d2..cf14b47fa9f3e 100644 --- a/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/StoredExpressionIT.java +++ b/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/StoredExpressionIT.java @@ -59,7 +59,7 @@ protected Settings nodeSettings(int nodeOrdinal) { @Override protected Collection> nodePlugins() { - return Collections.singleton(ExpressionPlugin.class); + return Collections.singleton(ExpressionModulePlugin.class); } public void testAllOpsDisabledIndexedScripts() throws IOException { diff --git a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionPlugin.java b/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionModulePlugin.java similarity index 95% rename from modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionPlugin.java rename to modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionModulePlugin.java index 440a03c63bd98..7640fcf7a3bc5 100644 --- a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionPlugin.java +++ b/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionModulePlugin.java @@ -40,7 +40,7 @@ import org.opensearch.script.ScriptContext; import org.opensearch.script.ScriptEngine; -public class ExpressionPlugin extends Plugin implements ScriptPlugin { +public class ExpressionModulePlugin extends Plugin implements ScriptPlugin { @Override public ScriptEngine getScriptEngine(Settings settings, Collection> contexts) { diff --git a/modules/lang-mustache/build.gradle b/modules/lang-mustache/build.gradle index 511a6b144c21a..14eafd8d43e13 100644 --- a/modules/lang-mustache/build.gradle +++ b/modules/lang-mustache/build.gradle @@ -33,7 +33,7 @@ apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { description 'Mustache scripting integration for OpenSearch' - classname 'org.opensearch.script.mustache.MustachePlugin' + classname 'org.opensearch.script.mustache.MustacheModulePlugin' hasClientJar = true // For the template apis and query } diff --git a/modules/lang-mustache/src/internalClusterTest/java/org/opensearch/script/mustache/MultiSearchTemplateIT.java b/modules/lang-mustache/src/internalClusterTest/java/org/opensearch/script/mustache/MultiSearchTemplateIT.java index 617f1f4f738a0..fbb8ebdec384c 100644 --- a/modules/lang-mustache/src/internalClusterTest/java/org/opensearch/script/mustache/MultiSearchTemplateIT.java +++ b/modules/lang-mustache/src/internalClusterTest/java/org/opensearch/script/mustache/MultiSearchTemplateIT.java @@ -58,7 +58,7 @@ public class MultiSearchTemplateIT extends OpenSearchIntegTestCase { @Override protected Collection> nodePlugins() { - return Collections.singleton(MustachePlugin.class); + return Collections.singleton(MustacheModulePlugin.class); } public void testBasic() throws Exception { diff --git a/modules/lang-mustache/src/internalClusterTest/java/org/opensearch/script/mustache/SearchTemplateIT.java b/modules/lang-mustache/src/internalClusterTest/java/org/opensearch/script/mustache/SearchTemplateIT.java index 61f047a32f1c1..87ef8b810f5e0 100644 --- a/modules/lang-mustache/src/internalClusterTest/java/org/opensearch/script/mustache/SearchTemplateIT.java +++ b/modules/lang-mustache/src/internalClusterTest/java/org/opensearch/script/mustache/SearchTemplateIT.java @@ -62,7 +62,7 @@ public class SearchTemplateIT extends OpenSearchSingleNodeTestCase { @Override protected Collection> getPlugins() { - return Collections.singleton(MustachePlugin.class); + return Collections.singleton(MustacheModulePlugin.class); } @Before diff --git a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustachePlugin.java b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustacheModulePlugin.java similarity index 96% rename from modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustachePlugin.java rename to modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustacheModulePlugin.java index 498dc2ac57cc7..03be9d7efb2db 100644 --- a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustachePlugin.java +++ b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustacheModulePlugin.java @@ -54,7 +54,7 @@ import java.util.List; import java.util.function.Supplier; -public class MustachePlugin extends Plugin implements ScriptPlugin, ActionPlugin, SearchPlugin { +public class MustacheModulePlugin extends Plugin implements ScriptPlugin, ActionPlugin, SearchPlugin { @Override public ScriptEngine getScriptEngine(Settings settings, Collection> contexts) { diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle index c997068ccb6c9..f29c48aecf5df 100644 --- a/modules/lang-painless/build.gradle +++ b/modules/lang-painless/build.gradle @@ -37,7 +37,7 @@ apply plugin: 'com.github.johnrengelman.shadow' opensearchplugin { description 'An easy, safe and fast scripting language for OpenSearch' - classname 'org.opensearch.painless.PainlessPlugin' + classname 'org.opensearch.painless.PainlessModulePlugin' } testClusters.all { diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessPlugin.java b/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessModulePlugin.java similarity index 98% rename from modules/lang-painless/src/main/java/org/opensearch/painless/PainlessPlugin.java rename to modules/lang-painless/src/main/java/org/opensearch/painless/PainlessModulePlugin.java index 9a235df0fb184..4a432b8f17b25 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessPlugin.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessModulePlugin.java @@ -81,7 +81,7 @@ /** * Registers Painless as a plugin. */ -public final class PainlessPlugin extends Plugin implements ScriptPlugin, ExtensiblePlugin, ActionPlugin { +public final class PainlessModulePlugin extends Plugin implements ScriptPlugin, ExtensiblePlugin, ActionPlugin { private static final Map, List> allowlists; diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/action/PainlessExecuteApiTests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/action/PainlessExecuteApiTests.java index 424d88cbc9e00..6e318eda91985 100644 --- a/modules/lang-painless/src/test/java/org/opensearch/painless/action/PainlessExecuteApiTests.java +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/action/PainlessExecuteApiTests.java @@ -36,7 +36,7 @@ import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.IndexService; import org.opensearch.index.query.MatchQueryBuilder; -import org.opensearch.painless.PainlessPlugin; +import org.opensearch.painless.PainlessModulePlugin; import org.opensearch.painless.action.PainlessExecuteAction.Request; import org.opensearch.painless.action.PainlessExecuteAction.Response; import org.opensearch.plugins.Plugin; @@ -60,7 +60,7 @@ public class PainlessExecuteApiTests extends OpenSearchSingleNodeTestCase { @Override protected Collection> getPlugins() { - return Collections.singleton(PainlessPlugin.class); + return Collections.singleton(PainlessModulePlugin.class); } public void testDefaults() throws IOException { diff --git a/modules/mapper-extras/build.gradle b/modules/mapper-extras/build.gradle index 08758c7ab2bda..b16176ca5aa72 100644 --- a/modules/mapper-extras/build.gradle +++ b/modules/mapper-extras/build.gradle @@ -32,7 +32,7 @@ apply plugin: 'opensearch.java-rest-test' opensearchplugin { description 'Adds advanced field mappers' - classname 'org.opensearch.index.mapper.MapperExtrasPlugin' + classname 'org.opensearch.index.mapper.MapperExtrasModulePlugin' hasClientJar = true } diff --git a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/MapperExtrasPlugin.java b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/MapperExtrasModulePlugin.java similarity index 96% rename from modules/mapper-extras/src/main/java/org/opensearch/index/mapper/MapperExtrasPlugin.java rename to modules/mapper-extras/src/main/java/org/opensearch/index/mapper/MapperExtrasModulePlugin.java index ebb86bc055825..081a3414c3675 100644 --- a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/MapperExtrasPlugin.java +++ b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/MapperExtrasModulePlugin.java @@ -43,7 +43,7 @@ import java.util.List; import java.util.Map; -public class MapperExtrasPlugin extends Plugin implements MapperPlugin, SearchPlugin { +public class MapperExtrasModulePlugin extends Plugin implements MapperPlugin, SearchPlugin { @Override public Map getMappers() { diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/BWCTemplateTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/BWCTemplateTests.java index d9e40fac1ad0f..d498116efc108 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/BWCTemplateTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/BWCTemplateTests.java @@ -47,7 +47,7 @@ public class BWCTemplateTests extends OpenSearchSingleNodeTestCase { @Override protected Collection> getPlugins() { - return pluginList(MapperExtrasPlugin.class); + return pluginList(MapperExtrasModulePlugin.class); } public void testBeatsTemplatesBWC() throws Exception { diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeatureFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeatureFieldMapperTests.java index 908e5db6196c3..1e001eb2f55a5 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeatureFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeatureFieldMapperTests.java @@ -72,7 +72,7 @@ protected void assertExistsQuery(MappedFieldType fieldType, Query query, ParseCo @Override protected Collection getPlugins() { - return List.of(new MapperExtrasPlugin()); + return List.of(new MapperExtrasModulePlugin()); } static int getFrequency(TokenStream tk) throws IOException { diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeatureMetaFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeatureMetaFieldMapperTests.java index 3161e7462d2a0..63487fd7baa89 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeatureMetaFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeatureMetaFieldMapperTests.java @@ -57,7 +57,7 @@ public void setup() { @Override protected Collection> getPlugins() { - return pluginList(MapperExtrasPlugin.class); + return pluginList(MapperExtrasModulePlugin.class); } public void testBasics() throws Exception { diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeaturesFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeaturesFieldMapperTests.java index 55d825d1b53bb..33e780c622a82 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeaturesFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeaturesFieldMapperTests.java @@ -58,7 +58,7 @@ protected void assertExistsQuery(MapperService mapperService) { @Override protected Collection getPlugins() { - return org.opensearch.common.collect.List.of(new MapperExtrasPlugin()); + return org.opensearch.common.collect.List.of(new MapperExtrasModulePlugin()); } @Override diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldMapperTests.java index e19f9dd7988e1..74c79b0db469b 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldMapperTests.java @@ -53,7 +53,7 @@ public class ScaledFloatFieldMapperTests extends MapperTestCase { @Override protected Collection getPlugins() { - return singletonList(new MapperExtrasPlugin()); + return singletonList(new MapperExtrasModulePlugin()); } @Override diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java index 7c4b8956d9e3c..6ab2293620893 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java @@ -133,7 +133,7 @@ protected void writeFieldValue(XContentBuilder builder) throws IOException { @Override protected Collection getPlugins() { - return org.opensearch.common.collect.List.of(new MapperExtrasPlugin()); + return org.opensearch.common.collect.List.of(new MapperExtrasModulePlugin()); } @Override diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/TokenCountFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/TokenCountFieldMapperTests.java index e9d3767373b95..1ebec6b963b53 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/TokenCountFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/TokenCountFieldMapperTests.java @@ -62,7 +62,7 @@ public class TokenCountFieldMapperTests extends MapperTestCase { @Override protected Collection getPlugins() { - return Collections.singletonList(new MapperExtrasPlugin()); + return Collections.singletonList(new MapperExtrasModulePlugin()); } @Override diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/query/RankFeatureQueryBuilderTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/query/RankFeatureQueryBuilderTests.java index e183ba6f6735c..f57aac8a244b7 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/query/RankFeatureQueryBuilderTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/query/RankFeatureQueryBuilderTests.java @@ -38,7 +38,7 @@ import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest; import org.opensearch.common.Strings; import org.opensearch.common.compress.CompressedXContent; -import org.opensearch.index.mapper.MapperExtrasPlugin; +import org.opensearch.index.mapper.MapperExtrasModulePlugin; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.RankFeatureQueryBuilder.ScoreFunction; import org.opensearch.plugins.Plugin; @@ -78,7 +78,7 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws @Override protected Collection> getPlugins() { - return Arrays.asList(MapperExtrasPlugin.class, TestGeoShapeFieldMapperPlugin.class); + return Arrays.asList(MapperExtrasModulePlugin.class, TestGeoShapeFieldMapperPlugin.class); } @Override diff --git a/modules/opensearch-dashboards/build.gradle b/modules/opensearch-dashboards/build.gradle index f76ca739faf81..07453e1f70f1c 100644 --- a/modules/opensearch-dashboards/build.gradle +++ b/modules/opensearch-dashboards/build.gradle @@ -31,7 +31,7 @@ apply plugin: 'opensearch.java-rest-test' opensearchplugin { description 'Plugin exposing APIs for OpenSearch Dashboards system indices' - classname 'org.opensearch.dashboards.OpenSearchDashboardsPlugin' + classname 'org.opensearch.dashboards.OpenSearchDashboardsModulePlugin' } dependencies { diff --git a/modules/opensearch-dashboards/src/main/java/org/opensearch/dashboards/OpenSearchDashboardsPlugin.java b/modules/opensearch-dashboards/src/main/java/org/opensearch/dashboards/OpenSearchDashboardsModulePlugin.java similarity index 98% rename from modules/opensearch-dashboards/src/main/java/org/opensearch/dashboards/OpenSearchDashboardsPlugin.java rename to modules/opensearch-dashboards/src/main/java/org/opensearch/dashboards/OpenSearchDashboardsModulePlugin.java index 534218ef438e7..09fd52ff65c66 100644 --- a/modules/opensearch-dashboards/src/main/java/org/opensearch/dashboards/OpenSearchDashboardsPlugin.java +++ b/modules/opensearch-dashboards/src/main/java/org/opensearch/dashboards/OpenSearchDashboardsModulePlugin.java @@ -75,7 +75,7 @@ import static java.util.Collections.unmodifiableList; -public class OpenSearchDashboardsPlugin extends Plugin implements SystemIndexPlugin { +public class OpenSearchDashboardsModulePlugin extends Plugin implements SystemIndexPlugin { public static final Setting> OPENSEARCH_DASHBOARDS_INDEX_NAMES_SETTING = Setting.listSetting( "opensearch_dashboards.system_indices", diff --git a/modules/opensearch-dashboards/src/test/java/org/opensearch/dashboards/OpenSearchDashboardsPluginTests.java b/modules/opensearch-dashboards/src/test/java/org/opensearch/dashboards/OpenSearchDashboardsModulePluginTests.java similarity index 79% rename from modules/opensearch-dashboards/src/test/java/org/opensearch/dashboards/OpenSearchDashboardsPluginTests.java rename to modules/opensearch-dashboards/src/test/java/org/opensearch/dashboards/OpenSearchDashboardsModulePluginTests.java index a74af8cd257b5..1573113d58ecd 100644 --- a/modules/opensearch-dashboards/src/test/java/org/opensearch/dashboards/OpenSearchDashboardsPluginTests.java +++ b/modules/opensearch-dashboards/src/test/java/org/opensearch/dashboards/OpenSearchDashboardsModulePluginTests.java @@ -44,28 +44,28 @@ import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.is; -public class OpenSearchDashboardsPluginTests extends OpenSearchTestCase { +public class OpenSearchDashboardsModulePluginTests extends OpenSearchTestCase { public void testOpenSearchDashboardsIndexNames() { assertThat( - new OpenSearchDashboardsPlugin().getSettings(), - contains(OpenSearchDashboardsPlugin.OPENSEARCH_DASHBOARDS_INDEX_NAMES_SETTING) + new OpenSearchDashboardsModulePlugin().getSettings(), + contains(OpenSearchDashboardsModulePlugin.OPENSEARCH_DASHBOARDS_INDEX_NAMES_SETTING) ); assertThat( - new OpenSearchDashboardsPlugin().getSystemIndexDescriptors(Settings.EMPTY) + new OpenSearchDashboardsModulePlugin().getSystemIndexDescriptors(Settings.EMPTY) .stream() .map(SystemIndexDescriptor::getIndexPattern) .collect(Collectors.toList()), contains(".opensearch_dashboards", ".opensearch_dashboards_*", ".reporting-*", ".apm-agent-configuration", ".apm-custom-link") ); final List names = Collections.unmodifiableList(Arrays.asList("." + randomAlphaOfLength(4), "." + randomAlphaOfLength(5))); - final List namesFromDescriptors = new OpenSearchDashboardsPlugin().getSystemIndexDescriptors( - Settings.builder().putList(OpenSearchDashboardsPlugin.OPENSEARCH_DASHBOARDS_INDEX_NAMES_SETTING.getKey(), names).build() + final List namesFromDescriptors = new OpenSearchDashboardsModulePlugin().getSystemIndexDescriptors( + Settings.builder().putList(OpenSearchDashboardsModulePlugin.OPENSEARCH_DASHBOARDS_INDEX_NAMES_SETTING.getKey(), names).build() ).stream().map(SystemIndexDescriptor::getIndexPattern).collect(Collectors.toList()); assertThat(namesFromDescriptors, is(names)); assertThat( - new OpenSearchDashboardsPlugin().getSystemIndexDescriptors(Settings.EMPTY) + new OpenSearchDashboardsModulePlugin().getSystemIndexDescriptors(Settings.EMPTY) .stream() .anyMatch(systemIndexDescriptor -> systemIndexDescriptor.matchesIndexPattern(".opensearch_dashboards-event-log-7-1")), is(false) diff --git a/modules/parent-join/build.gradle b/modules/parent-join/build.gradle index 3c71a731e6a6a..d509e65106e7b 100644 --- a/modules/parent-join/build.gradle +++ b/modules/parent-join/build.gradle @@ -32,7 +32,7 @@ apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { description 'This module adds the support parent-child queries and aggregations' - classname 'org.opensearch.join.ParentJoinPlugin' + classname 'org.opensearch.join.ParentJoinModulePlugin' hasClientJar = true } diff --git a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ParentChildTestCase.java b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ParentChildTestCase.java index 5d6d4fb333d49..71d7c0928e643 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ParentChildTestCase.java +++ b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ParentChildTestCase.java @@ -38,7 +38,7 @@ import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.index.IndexModule; -import org.opensearch.join.ParentJoinPlugin; +import org.opensearch.join.ParentJoinModulePlugin; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalSettingsPlugin; @@ -60,7 +60,7 @@ protected boolean ignoreExternalCluster() { @Override protected Collection> nodePlugins() { - return Arrays.asList(InternalSettingsPlugin.class, ParentJoinPlugin.class); + return Arrays.asList(InternalSettingsPlugin.class, ParentJoinModulePlugin.class); } @Override diff --git a/modules/parent-join/src/main/java/org/opensearch/join/ParentJoinPlugin.java b/modules/parent-join/src/main/java/org/opensearch/join/ParentJoinModulePlugin.java similarity index 95% rename from modules/parent-join/src/main/java/org/opensearch/join/ParentJoinPlugin.java rename to modules/parent-join/src/main/java/org/opensearch/join/ParentJoinModulePlugin.java index 6889e38c04ff3..31e5be7f9979c 100644 --- a/modules/parent-join/src/main/java/org/opensearch/join/ParentJoinPlugin.java +++ b/modules/parent-join/src/main/java/org/opensearch/join/ParentJoinModulePlugin.java @@ -50,9 +50,9 @@ import java.util.List; import java.util.Map; -public class ParentJoinPlugin extends Plugin implements SearchPlugin, MapperPlugin { +public class ParentJoinModulePlugin extends Plugin implements SearchPlugin, MapperPlugin { - public ParentJoinPlugin() {} + public ParentJoinModulePlugin() {} @Override public List> getQueries() { diff --git a/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ChildrenTests.java b/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ChildrenTests.java index 71088e1d5391d..086176bfbde6c 100644 --- a/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ChildrenTests.java +++ b/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ChildrenTests.java @@ -32,7 +32,7 @@ package org.opensearch.join.aggregations; -import org.opensearch.join.ParentJoinPlugin; +import org.opensearch.join.ParentJoinModulePlugin; import org.opensearch.plugins.Plugin; import org.opensearch.search.aggregations.BaseAggregationTestCase; import org.opensearch.test.TestGeoShapeFieldMapperPlugin; @@ -44,7 +44,7 @@ public class ChildrenTests extends BaseAggregationTestCase> getPlugins() { - return Arrays.asList(ParentJoinPlugin.class, TestGeoShapeFieldMapperPlugin.class); + return Arrays.asList(ParentJoinModulePlugin.class, TestGeoShapeFieldMapperPlugin.class); } @Override diff --git a/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ChildrenToParentAggregatorTests.java b/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ChildrenToParentAggregatorTests.java index 0d134592fa678..d1b4f3c3ebc27 100644 --- a/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ChildrenToParentAggregatorTests.java +++ b/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ChildrenToParentAggregatorTests.java @@ -61,7 +61,7 @@ import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.index.mapper.Uid; import org.opensearch.index.shard.ShardId; -import org.opensearch.join.ParentJoinPlugin; +import org.opensearch.join.ParentJoinModulePlugin; import org.opensearch.join.mapper.MetaJoinFieldMapper; import org.opensearch.join.mapper.ParentJoinFieldMapper; import org.opensearch.plugins.SearchPlugin; @@ -350,6 +350,6 @@ private void testCaseTermsParentTerms(Query query, IndexSearcher indexSearcher, @Override protected List getSearchPlugins() { - return Collections.singletonList(new ParentJoinPlugin()); + return Collections.singletonList(new ParentJoinModulePlugin()); } } diff --git a/modules/parent-join/src/test/java/org/opensearch/join/aggregations/InternalChildrenTests.java b/modules/parent-join/src/test/java/org/opensearch/join/aggregations/InternalChildrenTests.java index 8eaedc4aa15b0..3884c856d9a0e 100644 --- a/modules/parent-join/src/test/java/org/opensearch/join/aggregations/InternalChildrenTests.java +++ b/modules/parent-join/src/test/java/org/opensearch/join/aggregations/InternalChildrenTests.java @@ -35,7 +35,7 @@ import org.opensearch.common.ParseField; import org.opensearch.common.xcontent.NamedXContentRegistry; import org.opensearch.common.xcontent.NamedXContentRegistry.Entry; -import org.opensearch.join.ParentJoinPlugin; +import org.opensearch.join.ParentJoinModulePlugin; import org.opensearch.plugins.SearchPlugin; import org.opensearch.search.aggregations.Aggregation; import org.opensearch.search.aggregations.InternalAggregations; @@ -50,7 +50,7 @@ public class InternalChildrenTests extends InternalSingleBucketAggregationTestCa @Override protected SearchPlugin registerPlugin() { - return new ParentJoinPlugin(); + return new ParentJoinModulePlugin(); } @Override diff --git a/modules/parent-join/src/test/java/org/opensearch/join/aggregations/InternalParentTests.java b/modules/parent-join/src/test/java/org/opensearch/join/aggregations/InternalParentTests.java index cd5236ab49a39..72bfc55888147 100644 --- a/modules/parent-join/src/test/java/org/opensearch/join/aggregations/InternalParentTests.java +++ b/modules/parent-join/src/test/java/org/opensearch/join/aggregations/InternalParentTests.java @@ -34,7 +34,7 @@ import org.opensearch.common.ParseField; import org.opensearch.common.xcontent.NamedXContentRegistry.Entry; -import org.opensearch.join.ParentJoinPlugin; +import org.opensearch.join.ParentJoinModulePlugin; import org.opensearch.plugins.SearchPlugin; import org.opensearch.search.aggregations.Aggregation; import org.opensearch.search.aggregations.InternalAggregations; @@ -49,7 +49,7 @@ public class InternalParentTests extends InternalSingleBucketAggregationTestCase @Override protected SearchPlugin registerPlugin() { - return new ParentJoinPlugin(); + return new ParentJoinModulePlugin(); } @Override diff --git a/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ParentTests.java b/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ParentTests.java index abb258f37198b..4f08e004ea208 100644 --- a/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ParentTests.java +++ b/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ParentTests.java @@ -35,7 +35,7 @@ import java.util.Arrays; import java.util.Collection; -import org.opensearch.join.ParentJoinPlugin; +import org.opensearch.join.ParentJoinModulePlugin; import org.opensearch.plugins.Plugin; import org.opensearch.search.aggregations.BaseAggregationTestCase; import org.opensearch.test.TestGeoShapeFieldMapperPlugin; @@ -44,7 +44,7 @@ public class ParentTests extends BaseAggregationTestCase> getPlugins() { - return Arrays.asList(ParentJoinPlugin.class, TestGeoShapeFieldMapperPlugin.class); + return Arrays.asList(ParentJoinModulePlugin.class, TestGeoShapeFieldMapperPlugin.class); } @Override diff --git a/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ParentToChildrenAggregatorTests.java b/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ParentToChildrenAggregatorTests.java index 2ed06ee0c0ea9..61a1b761f17ab 100644 --- a/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ParentToChildrenAggregatorTests.java +++ b/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ParentToChildrenAggregatorTests.java @@ -62,7 +62,7 @@ import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.index.mapper.Uid; import org.opensearch.index.shard.ShardId; -import org.opensearch.join.ParentJoinPlugin; +import org.opensearch.join.ParentJoinModulePlugin; import org.opensearch.join.mapper.MetaJoinFieldMapper; import org.opensearch.join.mapper.ParentJoinFieldMapper; import org.opensearch.plugins.SearchPlugin; @@ -287,6 +287,6 @@ private void testCase(Query query, IndexSearcher indexSearcher, Consumer getSearchPlugins() { - return Collections.singletonList(new ParentJoinPlugin()); + return Collections.singletonList(new ParentJoinModulePlugin()); } } diff --git a/modules/parent-join/src/test/java/org/opensearch/join/mapper/ParentJoinFieldMapperTests.java b/modules/parent-join/src/test/java/org/opensearch/join/mapper/ParentJoinFieldMapperTests.java index a9ac151dd3806..53e9495b707fe 100644 --- a/modules/parent-join/src/test/java/org/opensearch/join/mapper/ParentJoinFieldMapperTests.java +++ b/modules/parent-join/src/test/java/org/opensearch/join/mapper/ParentJoinFieldMapperTests.java @@ -44,7 +44,7 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.ParsedDocument; import org.opensearch.index.mapper.SourceToParse; -import org.opensearch.join.ParentJoinPlugin; +import org.opensearch.join.ParentJoinModulePlugin; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchSingleNodeTestCase; @@ -56,7 +56,7 @@ public class ParentJoinFieldMapperTests extends OpenSearchSingleNodeTestCase { @Override protected Collection> getPlugins() { - return Collections.singletonList(ParentJoinPlugin.class); + return Collections.singletonList(ParentJoinModulePlugin.class); } public void testSingleLevel() throws Exception { diff --git a/modules/parent-join/src/test/java/org/opensearch/join/query/HasChildQueryBuilderTests.java b/modules/parent-join/src/test/java/org/opensearch/join/query/HasChildQueryBuilderTests.java index 5595c98a439bf..5a7b51b64210c 100644 --- a/modules/parent-join/src/test/java/org/opensearch/join/query/HasChildQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/opensearch/join/query/HasChildQueryBuilderTests.java @@ -63,7 +63,7 @@ import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.index.query.WrapperQueryBuilder; import org.opensearch.index.similarity.SimilarityService; -import org.opensearch.join.ParentJoinPlugin; +import org.opensearch.join.ParentJoinModulePlugin; import org.opensearch.plugins.Plugin; import org.opensearch.search.sort.FieldSortBuilder; import org.opensearch.search.sort.SortOrder; @@ -99,7 +99,7 @@ public class HasChildQueryBuilderTests extends AbstractQueryTestCase> getPlugins() { - return Arrays.asList(ParentJoinPlugin.class, TestGeoShapeFieldMapperPlugin.class); + return Arrays.asList(ParentJoinModulePlugin.class, TestGeoShapeFieldMapperPlugin.class); } @Override diff --git a/modules/parent-join/src/test/java/org/opensearch/join/query/HasParentQueryBuilderTests.java b/modules/parent-join/src/test/java/org/opensearch/join/query/HasParentQueryBuilderTests.java index 0f983799a6d25..32c9e490be165 100644 --- a/modules/parent-join/src/test/java/org/opensearch/join/query/HasParentQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/opensearch/join/query/HasParentQueryBuilderTests.java @@ -50,7 +50,7 @@ import org.opensearch.index.query.QueryShardException; import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.index.query.WrapperQueryBuilder; -import org.opensearch.join.ParentJoinPlugin; +import org.opensearch.join.ParentJoinModulePlugin; import org.opensearch.plugins.Plugin; import org.opensearch.search.sort.FieldSortBuilder; import org.opensearch.search.sort.SortOrder; @@ -83,7 +83,7 @@ public class HasParentQueryBuilderTests extends AbstractQueryTestCase> getPlugins() { - return Arrays.asList(ParentJoinPlugin.class, TestGeoShapeFieldMapperPlugin.class); + return Arrays.asList(ParentJoinModulePlugin.class, TestGeoShapeFieldMapperPlugin.class); } @Override diff --git a/modules/parent-join/src/test/java/org/opensearch/join/query/ParentIdQueryBuilderTests.java b/modules/parent-join/src/test/java/org/opensearch/join/query/ParentIdQueryBuilderTests.java index 41bc717db1fc8..995206120d075 100644 --- a/modules/parent-join/src/test/java/org/opensearch/join/query/ParentIdQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/opensearch/join/query/ParentIdQueryBuilderTests.java @@ -45,7 +45,7 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.query.QueryShardException; -import org.opensearch.join.ParentJoinPlugin; +import org.opensearch.join.ParentJoinModulePlugin; import org.opensearch.plugins.Plugin; import org.opensearch.test.AbstractQueryTestCase; import org.opensearch.test.TestGeoShapeFieldMapperPlugin; @@ -72,7 +72,7 @@ public class ParentIdQueryBuilderTests extends AbstractQueryTestCase> getPlugins() { - return Arrays.asList(ParentJoinPlugin.class, TestGeoShapeFieldMapperPlugin.class); + return Arrays.asList(ParentJoinModulePlugin.class, TestGeoShapeFieldMapperPlugin.class); } @Override diff --git a/modules/percolator/build.gradle b/modules/percolator/build.gradle index 1738de5a55748..2312f7bda80b2 100644 --- a/modules/percolator/build.gradle +++ b/modules/percolator/build.gradle @@ -32,7 +32,7 @@ apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { description 'Percolator module adds capability to index queries and query these queries by specifying documents' - classname 'org.opensearch.percolator.PercolatorPlugin' + classname 'org.opensearch.percolator.PercolatorModulePlugin' hasClientJar = true } diff --git a/modules/percolator/src/internalClusterTest/java/org/opensearch/percolator/PercolatorQuerySearchIT.java b/modules/percolator/src/internalClusterTest/java/org/opensearch/percolator/PercolatorQuerySearchIT.java index 8d3c37bc9b039..8c0bfb378bccd 100644 --- a/modules/percolator/src/internalClusterTest/java/org/opensearch/percolator/PercolatorQuerySearchIT.java +++ b/modules/percolator/src/internalClusterTest/java/org/opensearch/percolator/PercolatorQuerySearchIT.java @@ -44,7 +44,7 @@ import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentType; -import org.opensearch.geo.GeoPlugin; +import org.opensearch.geo.GeoModulePlugin; import org.opensearch.index.mapper.MapperParsingException; import org.opensearch.index.query.MatchPhraseQueryBuilder; import org.opensearch.index.query.MultiMatchQueryBuilder; @@ -93,7 +93,7 @@ protected boolean addMockGeoShapeFieldMapper() { @Override protected Collection> nodePlugins() { - return Arrays.asList(PercolatorPlugin.class, GeoPlugin.class); + return Arrays.asList(PercolatorModulePlugin.class, GeoModulePlugin.class); } public void testPercolatorQuery() throws Exception { diff --git a/modules/percolator/src/main/java/org/opensearch/percolator/PercolatorPlugin.java b/modules/percolator/src/main/java/org/opensearch/percolator/PercolatorModulePlugin.java similarity index 96% rename from modules/percolator/src/main/java/org/opensearch/percolator/PercolatorPlugin.java rename to modules/percolator/src/main/java/org/opensearch/percolator/PercolatorModulePlugin.java index f841860d3930a..c0416f0336a10 100644 --- a/modules/percolator/src/main/java/org/opensearch/percolator/PercolatorPlugin.java +++ b/modules/percolator/src/main/java/org/opensearch/percolator/PercolatorModulePlugin.java @@ -46,7 +46,7 @@ import static java.util.Collections.singletonList; import static java.util.Collections.singletonMap; -public class PercolatorPlugin extends Plugin implements MapperPlugin, SearchPlugin { +public class PercolatorModulePlugin extends Plugin implements MapperPlugin, SearchPlugin { @Override public List> getQueries() { return singletonList(new QuerySpec<>(PercolateQueryBuilder.NAME, PercolateQueryBuilder::new, PercolateQueryBuilder::fromXContent)); diff --git a/modules/percolator/src/test/java/org/opensearch/percolator/CandidateQueryTests.java b/modules/percolator/src/test/java/org/opensearch/percolator/CandidateQueryTests.java index e59aa227e3dc7..1e884f591cbf8 100644 --- a/modules/percolator/src/test/java/org/opensearch/percolator/CandidateQueryTests.java +++ b/modules/percolator/src/test/java/org/opensearch/percolator/CandidateQueryTests.java @@ -149,7 +149,7 @@ public class CandidateQueryTests extends OpenSearchSingleNodeTestCase { @Override protected Collection> getPlugins() { - return Collections.singleton(PercolatorPlugin.class); + return Collections.singleton(PercolatorModulePlugin.class); } @Before diff --git a/modules/percolator/src/test/java/org/opensearch/percolator/PercolateQueryBuilderTests.java b/modules/percolator/src/test/java/org/opensearch/percolator/PercolateQueryBuilderTests.java index 87aa28a3346bc..8a744a038442f 100644 --- a/modules/percolator/src/test/java/org/opensearch/percolator/PercolateQueryBuilderTests.java +++ b/modules/percolator/src/test/java/org/opensearch/percolator/PercolateQueryBuilderTests.java @@ -97,7 +97,7 @@ public class PercolateQueryBuilderTests extends AbstractQueryTestCase> getPlugins() { - return Arrays.asList(PercolatorPlugin.class, TestGeoShapeFieldMapperPlugin.class); + return Arrays.asList(PercolatorModulePlugin.class, TestGeoShapeFieldMapperPlugin.class); } @Override diff --git a/modules/percolator/src/test/java/org/opensearch/percolator/PercolatorFieldMapperTests.java b/modules/percolator/src/test/java/org/opensearch/percolator/PercolatorFieldMapperTests.java index fe9c486b68166..fc18a7af937f9 100644 --- a/modules/percolator/src/test/java/org/opensearch/percolator/PercolatorFieldMapperTests.java +++ b/modules/percolator/src/test/java/org/opensearch/percolator/PercolatorFieldMapperTests.java @@ -95,7 +95,7 @@ import org.opensearch.index.query.functionscore.RandomScoreFunctionBuilder; import org.opensearch.index.query.functionscore.ScriptScoreFunctionBuilder; import org.opensearch.indices.TermsLookup; -import org.opensearch.join.ParentJoinPlugin; +import org.opensearch.join.ParentJoinModulePlugin; import org.opensearch.join.query.HasChildQueryBuilder; import org.opensearch.join.query.HasParentQueryBuilder; import org.opensearch.plugins.Plugin; @@ -145,7 +145,12 @@ public class PercolatorFieldMapperTests extends OpenSearchSingleNodeTestCase { @Override protected Collection> getPlugins() { - return pluginList(InternalSettingsPlugin.class, PercolatorPlugin.class, FoolMeScriptPlugin.class, ParentJoinPlugin.class); + return pluginList( + InternalSettingsPlugin.class, + PercolatorModulePlugin.class, + FoolMeScriptPlugin.class, + ParentJoinModulePlugin.class + ); } @Override diff --git a/modules/percolator/src/test/java/org/opensearch/percolator/PercolatorQuerySearchTests.java b/modules/percolator/src/test/java/org/opensearch/percolator/PercolatorQuerySearchTests.java index 1d77c9d472864..3d44a94297790 100644 --- a/modules/percolator/src/test/java/org/opensearch/percolator/PercolatorQuerySearchTests.java +++ b/modules/percolator/src/test/java/org/opensearch/percolator/PercolatorQuerySearchTests.java @@ -78,7 +78,7 @@ public class PercolatorQuerySearchTests extends OpenSearchSingleNodeTestCase { @Override protected Collection> getPlugins() { - return Arrays.asList(PercolatorPlugin.class, CustomScriptPlugin.class); + return Arrays.asList(PercolatorModulePlugin.class, CustomScriptPlugin.class); } public static class CustomScriptPlugin extends MockScriptPlugin { diff --git a/modules/rank-eval/build.gradle b/modules/rank-eval/build.gradle index 2b1d1e9abc4b4..4232d583dc984 100644 --- a/modules/rank-eval/build.gradle +++ b/modules/rank-eval/build.gradle @@ -32,7 +32,7 @@ apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { description 'The Rank Eval module adds APIs to evaluate ranking quality.' - classname 'org.opensearch.index.rankeval.RankEvalPlugin' + classname 'org.opensearch.index.rankeval.RankEvalModulePlugin' hasClientJar = true } diff --git a/modules/rank-eval/src/internalClusterTest/java/org/opensearch/index/rankeval/RankEvalRequestIT.java b/modules/rank-eval/src/internalClusterTest/java/org/opensearch/index/rankeval/RankEvalRequestIT.java index ea80b59711b8a..6eb974c77a5f3 100644 --- a/modules/rank-eval/src/internalClusterTest/java/org/opensearch/index/rankeval/RankEvalRequestIT.java +++ b/modules/rank-eval/src/internalClusterTest/java/org/opensearch/index/rankeval/RankEvalRequestIT.java @@ -65,7 +65,7 @@ public class RankEvalRequestIT extends OpenSearchIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(RankEvalPlugin.class); + return Arrays.asList(RankEvalModulePlugin.class); } @Before diff --git a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RankEvalPlugin.java b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RankEvalModulePlugin.java similarity index 98% rename from modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RankEvalPlugin.java rename to modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RankEvalModulePlugin.java index a1eaa0f62f0f1..af960207e5175 100644 --- a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RankEvalPlugin.java +++ b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RankEvalModulePlugin.java @@ -53,7 +53,7 @@ import java.util.List; import java.util.function.Supplier; -public class RankEvalPlugin extends Plugin implements ActionPlugin { +public class RankEvalModulePlugin extends Plugin implements ActionPlugin { @Override public List> getActions() { diff --git a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/EvalQueryQualityTests.java b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/EvalQueryQualityTests.java index 56d23d8ca3184..ab34435c5f346 100644 --- a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/EvalQueryQualityTests.java +++ b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/EvalQueryQualityTests.java @@ -56,12 +56,14 @@ public class EvalQueryQualityTests extends OpenSearchTestCase { - private static NamedWriteableRegistry namedWritableRegistry = new NamedWriteableRegistry(new RankEvalPlugin().getNamedWriteables()); + private static NamedWriteableRegistry namedWritableRegistry = new NamedWriteableRegistry( + new RankEvalModulePlugin().getNamedWriteables() + ); @SuppressWarnings("resource") @Override protected NamedXContentRegistry xContentRegistry() { - return new NamedXContentRegistry(new RankEvalPlugin().getNamedXContent()); + return new NamedXContentRegistry(new RankEvalModulePlugin().getNamedXContent()); } public static EvalQueryQuality randomEvalQueryQuality() { diff --git a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RankEvalRequestTests.java b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RankEvalRequestTests.java index 401bb1b2a7bb7..c2a041ea59aee 100644 --- a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RankEvalRequestTests.java +++ b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RankEvalRequestTests.java @@ -47,21 +47,21 @@ public class RankEvalRequestTests extends AbstractWireSerializingTestCase { - private static RankEvalPlugin rankEvalPlugin = new RankEvalPlugin(); + private static RankEvalModulePlugin rankEvalModulePlugin = new RankEvalModulePlugin(); @AfterClass public static void releasePluginResources() throws IOException { - rankEvalPlugin.close(); + rankEvalModulePlugin.close(); } @Override protected NamedXContentRegistry xContentRegistry() { - return new NamedXContentRegistry(rankEvalPlugin.getNamedXContent()); + return new NamedXContentRegistry(rankEvalModulePlugin.getNamedXContent()); } @Override protected NamedWriteableRegistry getNamedWriteableRegistry() { - return new NamedWriteableRegistry(rankEvalPlugin.getNamedWriteables()); + return new NamedWriteableRegistry(rankEvalModulePlugin.getNamedWriteables()); } @Override diff --git a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RankEvalSpecTests.java b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RankEvalSpecTests.java index b79b26eb0af2e..31db13889be4b 100644 --- a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RankEvalSpecTests.java +++ b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RankEvalSpecTests.java @@ -71,7 +71,7 @@ public class RankEvalSpecTests extends OpenSearchTestCase { @SuppressWarnings("resource") @Override protected NamedXContentRegistry xContentRegistry() { - return new NamedXContentRegistry(new RankEvalPlugin().getNamedXContent()); + return new NamedXContentRegistry(new RankEvalModulePlugin().getNamedXContent()); } private static List randomList(Supplier randomSupplier) { diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle index 37526a924da73..cad7d67f3ef84 100644 --- a/modules/reindex/build.gradle +++ b/modules/reindex/build.gradle @@ -41,7 +41,7 @@ apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { description 'The Reindex module adds APIs to reindex from one index to another or update documents in place.' - classname 'org.opensearch.index.reindex.ReindexPlugin' + classname 'org.opensearch.index.reindex.ReindexModulePlugin' hasClientJar = true } diff --git a/modules/reindex/src/internalClusterTest/java/org/opensearch/client/documentation/ReindexDocumentationIT.java b/modules/reindex/src/internalClusterTest/java/org/opensearch/client/documentation/ReindexDocumentationIT.java index 6d313e06263b3..05cb87b3f9165 100644 --- a/modules/reindex/src/internalClusterTest/java/org/opensearch/client/documentation/ReindexDocumentationIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/opensearch/client/documentation/ReindexDocumentationIT.java @@ -46,7 +46,7 @@ import org.opensearch.index.reindex.DeleteByQueryAction; import org.opensearch.index.reindex.DeleteByQueryRequestBuilder; import org.opensearch.index.reindex.ReindexAction; -import org.opensearch.index.reindex.ReindexPlugin; +import org.opensearch.index.reindex.ReindexModulePlugin; import org.opensearch.index.reindex.ReindexRequestBuilder; import org.opensearch.index.reindex.RethrottleAction; import org.opensearch.index.reindex.RethrottleRequestBuilder; @@ -88,7 +88,7 @@ protected boolean ignoreExternalCluster() { @Override protected Collection> nodePlugins() { - return Arrays.asList(ReindexPlugin.class, ReindexCancellationPlugin.class); + return Arrays.asList(ReindexModulePlugin.class, ReindexCancellationPlugin.class); } @Before diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexPlugin.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexModulePlugin.java similarity index 96% rename from modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexPlugin.java rename to modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexModulePlugin.java index 865ae26f6f54d..28c6b4038ecfd 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexPlugin.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexModulePlugin.java @@ -37,7 +37,6 @@ import org.apache.logging.log4j.Logger; import org.opensearch.index.reindex.spi.RemoteReindexExtension; import org.opensearch.plugins.ExtensiblePlugin; -import org.opensearch.plugins.ExtensiblePlugin.ExtensionLoader; import org.opensearch.watcher.ResourceWatcherService; import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionResponse; @@ -62,7 +61,6 @@ import org.opensearch.script.ScriptService; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; -import org.opensearch.watcher.ResourceWatcherService; import java.util.ArrayList; import java.util.Arrays; @@ -73,9 +71,9 @@ import static java.util.Collections.singletonList; -public class ReindexPlugin extends Plugin implements ActionPlugin, ExtensiblePlugin { +public class ReindexModulePlugin extends Plugin implements ActionPlugin, ExtensiblePlugin { public static final String NAME = "reindex"; - private static final Logger logger = LogManager.getLogger(ReindexPlugin.class); + private static final Logger logger = LogManager.getLogger(ReindexModulePlugin.class); @Override public List> getActions() { diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/spi/RemoteReindexExtension.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/spi/RemoteReindexExtension.java index ce252de292d63..22d560b19c699 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/spi/RemoteReindexExtension.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/spi/RemoteReindexExtension.java @@ -7,10 +7,11 @@ import org.opensearch.action.ActionListener; import org.opensearch.index.reindex.BulkByScrollResponse; +import org.opensearch.index.reindex.ReindexModulePlugin; import org.opensearch.index.reindex.ReindexRequest; /** - * This interface provides an extension point for {@link org.opensearch.index.reindex.ReindexPlugin}. + * This interface provides an extension point for {@link ReindexModulePlugin}. * This interface can be implemented to provide a custom Rest interceptor and {@link ActionListener} * The Rest interceptor can be used to pre-process any reindex request and perform any action * on the response. The ActionListener listens to the success and failure events on every reindex request diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexFromRemoteWithAuthTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexFromRemoteWithAuthTests.java index 8ce850a936557..dd7eb977bbe48 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexFromRemoteWithAuthTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexFromRemoteWithAuthTests.java @@ -65,7 +65,7 @@ import org.opensearch.tasks.Task; import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.threadpool.ThreadPool; -import org.opensearch.transport.Netty4Plugin; +import org.opensearch.transport.Netty4ModulePlugin; import org.opensearch.watcher.ResourceWatcherService; import org.junit.Before; @@ -87,7 +87,7 @@ public class ReindexFromRemoteWithAuthTests extends OpenSearchSingleNodeTestCase @Override protected Collection> getPlugins() { - return Arrays.asList(Netty4Plugin.class, ReindexFromRemoteWithAuthTests.TestPlugin.class, ReindexPlugin.class); + return Arrays.asList(Netty4ModulePlugin.class, ReindexFromRemoteWithAuthTests.TestPlugin.class, ReindexModulePlugin.class); } @Override @@ -100,7 +100,7 @@ protected Settings nodeSettings() { Settings.Builder settings = Settings.builder().put(super.nodeSettings()); // Allowlist reindexing from the http host we're going to use settings.put(TransportReindexAction.REMOTE_CLUSTER_ALLOWLIST.getKey(), "127.0.0.1:*"); - settings.put(NetworkModule.HTTP_TYPE_KEY, Netty4Plugin.NETTY_HTTP_TRANSPORT_NAME); + settings.put(NetworkModule.HTTP_TYPE_KEY, Netty4ModulePlugin.NETTY_HTTP_TRANSPORT_NAME); return settings.build(); } diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexRenamedSettingTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexRenamedSettingTests.java index 00747d85221c6..be4bacce9b57c 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexRenamedSettingTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexRenamedSettingTests.java @@ -24,7 +24,7 @@ * The test can be removed along with removing support of the deprecated setting. */ public class ReindexRenamedSettingTests extends OpenSearchTestCase { - private final ReindexPlugin plugin = new ReindexPlugin(); + private final ReindexModulePlugin plugin = new ReindexModulePlugin(); /** * Validate the both settings are known and supported. diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexSingleNodeTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexSingleNodeTests.java index 8ce9cf74bb8be..8c113973f4415 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexSingleNodeTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexSingleNodeTests.java @@ -46,7 +46,7 @@ public class ReindexSingleNodeTests extends OpenSearchSingleNodeTestCase { @Override protected Collection> getPlugins() { - return Arrays.asList(ReindexPlugin.class); + return Arrays.asList(ReindexModulePlugin.class); } public void testDeprecatedSort() { diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexTestCase.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexTestCase.java index 0ce3a9f3461c3..0941516194f6e 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexTestCase.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexTestCase.java @@ -51,7 +51,7 @@ public abstract class ReindexTestCase extends OpenSearchIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(ReindexPlugin.class); + return Arrays.asList(ReindexModulePlugin.class); } protected ReindexRequestBuilder reindex() { diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/RetryTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/RetryTests.java index 0b052f8fd57a3..0a01843405d9f 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/RetryTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/RetryTests.java @@ -49,7 +49,7 @@ import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.threadpool.ThreadPool; -import org.opensearch.transport.Netty4Plugin; +import org.opensearch.transport.Netty4ModulePlugin; import org.junit.After; import java.util.ArrayList; @@ -84,7 +84,7 @@ public void forceUnblockAllExecutors() { @Override protected Collection> nodePlugins() { - return Arrays.asList(ReindexPlugin.class, Netty4Plugin.class); + return Arrays.asList(ReindexModulePlugin.class, Netty4ModulePlugin.class); } /** diff --git a/modules/repository-url/build.gradle b/modules/repository-url/build.gradle index 702f0e9bb0f8b..7a697623eb8d9 100644 --- a/modules/repository-url/build.gradle +++ b/modules/repository-url/build.gradle @@ -38,7 +38,7 @@ apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { description 'Module for URL repository' - classname 'org.opensearch.plugin.repository.url.URLRepositoryPlugin' + classname 'org.opensearch.plugin.repository.url.URLRepositoryModulePlugin' } restResources { diff --git a/modules/repository-url/src/internalClusterTest/java/org/opensearch/repositories/url/URLSnapshotRestoreIT.java b/modules/repository-url/src/internalClusterTest/java/org/opensearch/repositories/url/URLSnapshotRestoreIT.java index aa274549f3a9b..1bf461d67862b 100644 --- a/modules/repository-url/src/internalClusterTest/java/org/opensearch/repositories/url/URLSnapshotRestoreIT.java +++ b/modules/repository-url/src/internalClusterTest/java/org/opensearch/repositories/url/URLSnapshotRestoreIT.java @@ -39,7 +39,7 @@ import org.opensearch.client.Client; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.plugin.repository.url.URLRepositoryPlugin; +import org.opensearch.plugin.repository.url.URLRepositoryModulePlugin; import org.opensearch.plugins.Plugin; import org.opensearch.repositories.fs.FsRepository; import org.opensearch.snapshots.SnapshotState; @@ -59,7 +59,7 @@ public class URLSnapshotRestoreIT extends OpenSearchIntegTestCase { @Override protected Collection> nodePlugins() { - return Collections.singletonList(URLRepositoryPlugin.class); + return Collections.singletonList(URLRepositoryModulePlugin.class); } public void testUrlRepository() throws Exception { diff --git a/modules/repository-url/src/main/java/org/opensearch/plugin/repository/url/URLRepositoryPlugin.java b/modules/repository-url/src/main/java/org/opensearch/plugin/repository/url/URLRepositoryModulePlugin.java similarity index 96% rename from modules/repository-url/src/main/java/org/opensearch/plugin/repository/url/URLRepositoryPlugin.java rename to modules/repository-url/src/main/java/org/opensearch/plugin/repository/url/URLRepositoryModulePlugin.java index d1b08d443805f..00d697e9fb0f6 100644 --- a/modules/repository-url/src/main/java/org/opensearch/plugin/repository/url/URLRepositoryPlugin.java +++ b/modules/repository-url/src/main/java/org/opensearch/plugin/repository/url/URLRepositoryModulePlugin.java @@ -47,7 +47,7 @@ import java.util.List; import java.util.Map; -public class URLRepositoryPlugin extends Plugin implements RepositoryPlugin { +public class URLRepositoryModulePlugin extends Plugin implements RepositoryPlugin { @Override public List> getSettings() { diff --git a/modules/systemd/build.gradle b/modules/systemd/build.gradle index 26e094a9eeae1..726092ffe4273 100644 --- a/modules/systemd/build.gradle +++ b/modules/systemd/build.gradle @@ -30,5 +30,5 @@ opensearchplugin { description 'Integrates OpenSearch with systemd' - classname 'org.opensearch.systemd.SystemdPlugin' + classname 'org.opensearch.systemd.SystemdModulePlugin' } diff --git a/modules/systemd/src/main/java/org/opensearch/systemd/SystemdPlugin.java b/modules/systemd/src/main/java/org/opensearch/systemd/SystemdModulePlugin.java similarity index 96% rename from modules/systemd/src/main/java/org/opensearch/systemd/SystemdPlugin.java rename to modules/systemd/src/main/java/org/opensearch/systemd/SystemdModulePlugin.java index 8abdbbb74ae7f..a22b3f862e017 100644 --- a/modules/systemd/src/main/java/org/opensearch/systemd/SystemdPlugin.java +++ b/modules/systemd/src/main/java/org/opensearch/systemd/SystemdModulePlugin.java @@ -55,9 +55,9 @@ import java.util.Collections; import java.util.function.Supplier; -public class SystemdPlugin extends Plugin implements ClusterPlugin { +public class SystemdModulePlugin extends Plugin implements ClusterPlugin { - private static final Logger logger = LogManager.getLogger(SystemdPlugin.class); + private static final Logger logger = LogManager.getLogger(SystemdModulePlugin.class); private final boolean enabled; @@ -66,11 +66,11 @@ final boolean isEnabled() { } @SuppressWarnings("unused") - public SystemdPlugin() { + public SystemdModulePlugin() { this(System.getenv("OPENSEARCH_SD_NOTIFY")); } - SystemdPlugin(final String esSDNotify) { + SystemdModulePlugin(final String esSDNotify) { logger.trace("OPENSEARCH_SD_NOTIFY is set to [{}]", esSDNotify); if (esSDNotify == null) { enabled = false; diff --git a/modules/systemd/src/test/java/org/opensearch/systemd/SystemdPluginTests.java b/modules/systemd/src/test/java/org/opensearch/systemd/SystemdModulePluginTests.java similarity index 87% rename from modules/systemd/src/test/java/org/opensearch/systemd/SystemdPluginTests.java rename to modules/systemd/src/test/java/org/opensearch/systemd/SystemdModulePluginTests.java index 63d97a7486f58..532d47cd009e0 100644 --- a/modules/systemd/src/test/java/org/opensearch/systemd/SystemdPluginTests.java +++ b/modules/systemd/src/test/java/org/opensearch/systemd/SystemdModulePluginTests.java @@ -56,7 +56,7 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -public class SystemdPluginTests extends OpenSearchTestCase { +public class SystemdModulePluginTests extends OpenSearchTestCase { final Scheduler.Cancellable extender = mock(Scheduler.Cancellable.class); final ThreadPool threadPool = mock(ThreadPool.class); @@ -67,14 +67,14 @@ public class SystemdPluginTests extends OpenSearchTestCase { } public void testIsImplicitlyNotEnabled() { - final SystemdPlugin plugin = new SystemdPlugin(null); + final SystemdModulePlugin plugin = new SystemdModulePlugin(null); plugin.createComponents(null, null, threadPool, null, null, null, null, null, null, null, null); assertFalse(plugin.isEnabled()); assertNull(plugin.extender()); } public void testIsExplicitlyNotEnabled() { - final SystemdPlugin plugin = new SystemdPlugin(Boolean.FALSE.toString()); + final SystemdModulePlugin plugin = new SystemdModulePlugin(Boolean.FALSE.toString()); plugin.createComponents(null, null, threadPool, null, null, null, null, null, null, null, null); assertFalse(plugin.isEnabled()); assertNull(plugin.extender()); @@ -85,7 +85,7 @@ public void testInvalid() { s -> Boolean.TRUE.toString().equals(s) || Boolean.FALSE.toString().equals(s), () -> randomAlphaOfLength(4) ); - final RuntimeException e = expectThrows(RuntimeException.class, () -> new SystemdPlugin(esSDNotify)); + final RuntimeException e = expectThrows(RuntimeException.class, () -> new SystemdModulePlugin(esSDNotify)); assertThat(e, hasToString(containsString("OPENSEARCH_SD_NOTIFY set to unexpected value [" + esSDNotify + "]"))); } @@ -113,9 +113,9 @@ public void testOnNodeStartedNotEnabled() { private void runTestOnNodeStarted( final String esSDNotify, final int rc, - final BiConsumer, SystemdPlugin> assertions + final BiConsumer, SystemdModulePlugin> assertions ) { - runTest(esSDNotify, rc, assertions, SystemdPlugin::onNodeStarted, "READY=1"); + runTest(esSDNotify, rc, assertions, SystemdModulePlugin::onNodeStarted, "READY=1"); } public void testCloseSuccess() { @@ -138,21 +138,25 @@ public void testCloseNotEnabled() { runTestClose(Boolean.FALSE.toString(), randomInt(), (maybe, plugin) -> assertThat(maybe, OptionalMatchers.isEmpty())); } - private void runTestClose(final String esSDNotify, final int rc, final BiConsumer, SystemdPlugin> assertions) { - runTest(esSDNotify, rc, assertions, SystemdPlugin::close, "STOPPING=1"); + private void runTestClose( + final String esSDNotify, + final int rc, + final BiConsumer, SystemdModulePlugin> assertions + ) { + runTest(esSDNotify, rc, assertions, SystemdModulePlugin::close, "STOPPING=1"); } private void runTest( final String esSDNotify, final int rc, - final BiConsumer, SystemdPlugin> assertions, - final CheckedConsumer invocation, + final BiConsumer, SystemdModulePlugin> assertions, + final CheckedConsumer invocation, final String expectedState ) { final AtomicBoolean invoked = new AtomicBoolean(); final AtomicInteger invokedUnsetEnvironment = new AtomicInteger(); final AtomicReference invokedState = new AtomicReference<>(); - final SystemdPlugin plugin = new SystemdPlugin(esSDNotify) { + final SystemdModulePlugin plugin = new SystemdModulePlugin(esSDNotify) { @Override int sd_notify(final int unset_environment, final String state) { diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index 450eaed14fa46..b72cb6d868d79 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -49,7 +49,7 @@ apply plugin: 'opensearch.publish' */ opensearchplugin { description 'Netty 4 based transport implementation' - classname 'org.opensearch.transport.Netty4Plugin' + classname 'org.opensearch.transport.Netty4ModulePlugin' hasClientJar = true } diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/OpenSearchNetty4IntegTestCase.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/OpenSearchNetty4IntegTestCase.java index 9915b9fe51be2..9b086e0b8baa9 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/OpenSearchNetty4IntegTestCase.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/OpenSearchNetty4IntegTestCase.java @@ -35,7 +35,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.transport.Netty4Plugin; +import org.opensearch.transport.Netty4ModulePlugin; import org.opensearch.transport.netty4.Netty4Transport; import java.util.Collection; @@ -60,13 +60,13 @@ protected Settings nodeSettings(int nodeOrdinal) { if (randomBoolean()) { builder.put(Netty4Transport.WORKER_COUNT.getKey(), random().nextInt(3) + 1); } - builder.put(NetworkModule.TRANSPORT_TYPE_KEY, Netty4Plugin.NETTY_TRANSPORT_NAME); - builder.put(NetworkModule.HTTP_TYPE_KEY, Netty4Plugin.NETTY_HTTP_TRANSPORT_NAME); + builder.put(NetworkModule.TRANSPORT_TYPE_KEY, Netty4ModulePlugin.NETTY_TRANSPORT_NAME); + builder.put(NetworkModule.HTTP_TYPE_KEY, Netty4ModulePlugin.NETTY_HTTP_TRANSPORT_NAME); return builder.build(); } @Override protected Collection> nodePlugins() { - return Collections.singletonList(Netty4Plugin.class); + return Collections.singletonList(Netty4ModulePlugin.class); } } diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/netty4/Netty4TransportPublishAddressIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/netty4/Netty4TransportPublishAddressIT.java index a572a181a46fd..e6604abf126da 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/netty4/Netty4TransportPublishAddressIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/netty4/Netty4TransportPublishAddressIT.java @@ -41,7 +41,7 @@ import org.opensearch.common.transport.BoundTransportAddress; import org.opensearch.common.transport.TransportAddress; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.transport.Netty4Plugin; +import org.opensearch.transport.Netty4ModulePlugin; import org.opensearch.transport.TransportInfo; import java.net.Inet4Address; @@ -60,7 +60,7 @@ public class Netty4TransportPublishAddressIT extends OpenSearchNetty4IntegTestCa protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) - .put(NetworkModule.TRANSPORT_TYPE_KEY, Netty4Plugin.NETTY_TRANSPORT_NAME) + .put(NetworkModule.TRANSPORT_TYPE_KEY, Netty4ModulePlugin.NETTY_TRANSPORT_NAME) .build(); } diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4Plugin.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4ModulePlugin.java similarity index 98% rename from modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4Plugin.java rename to modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4ModulePlugin.java index 73cfe4e46fbda..8a18210fd963e 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4Plugin.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4ModulePlugin.java @@ -57,7 +57,7 @@ import java.util.Map; import java.util.function.Supplier; -public class Netty4Plugin extends Plugin implements NetworkPlugin { +public class Netty4ModulePlugin extends Plugin implements NetworkPlugin { public static final String NETTY_TRANSPORT_NAME = "netty4"; public static final String NETTY_HTTP_TRANSPORT_NAME = "netty4"; diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/HttpSmokeTestCase.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/HttpSmokeTestCase.java index 951c2df53ba59..08974b902c418 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/HttpSmokeTestCase.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/HttpSmokeTestCase.java @@ -35,7 +35,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.transport.Netty4Plugin; +import org.opensearch.transport.Netty4ModulePlugin; import org.opensearch.transport.nio.MockNioTransportPlugin; import org.opensearch.transport.nio.NioTransportPlugin; import org.junit.BeforeClass; @@ -52,9 +52,9 @@ public abstract class HttpSmokeTestCase extends OpenSearchIntegTestCase { @SuppressWarnings("unchecked") @BeforeClass public static void setUpTransport() { - nodeTransportTypeKey = getTypeKey(randomFrom(getTestTransportPlugin(), Netty4Plugin.class, NioTransportPlugin.class)); - nodeHttpTypeKey = getHttpTypeKey(randomFrom(Netty4Plugin.class, NioTransportPlugin.class)); - clientTypeKey = getTypeKey(randomFrom(getTestTransportPlugin(), Netty4Plugin.class, NioTransportPlugin.class)); + nodeTransportTypeKey = getTypeKey(randomFrom(getTestTransportPlugin(), Netty4ModulePlugin.class, NioTransportPlugin.class)); + nodeHttpTypeKey = getHttpTypeKey(randomFrom(Netty4ModulePlugin.class, NioTransportPlugin.class)); + clientTypeKey = getTypeKey(randomFrom(getTestTransportPlugin(), Netty4ModulePlugin.class, NioTransportPlugin.class)); } private static String getTypeKey(Class clazz) { @@ -63,8 +63,8 @@ private static String getTypeKey(Class clazz) { } else if (clazz.equals(NioTransportPlugin.class)) { return NioTransportPlugin.NIO_TRANSPORT_NAME; } else { - assert clazz.equals(Netty4Plugin.class); - return Netty4Plugin.NETTY_TRANSPORT_NAME; + assert clazz.equals(Netty4ModulePlugin.class); + return Netty4ModulePlugin.NETTY_TRANSPORT_NAME; } } @@ -72,8 +72,8 @@ private static String getHttpTypeKey(Class clazz) { if (clazz.equals(NioTransportPlugin.class)) { return NioTransportPlugin.NIO_HTTP_TRANSPORT_NAME; } else { - assert clazz.equals(Netty4Plugin.class); - return Netty4Plugin.NETTY_HTTP_TRANSPORT_NAME; + assert clazz.equals(Netty4ModulePlugin.class); + return Netty4ModulePlugin.NETTY_HTTP_TRANSPORT_NAME; } } @@ -92,7 +92,7 @@ protected Settings nodeSettings(int nodeOrdinal) { @Override protected Collection> nodePlugins() { - return Arrays.asList(getTestTransportPlugin(), Netty4Plugin.class, NioTransportPlugin.class); + return Arrays.asList(getTestTransportPlugin(), Netty4ModulePlugin.class, NioTransportPlugin.class); } @Override From d1ec4eab77f4c22ea4e5c8f2672cd7fb2a762d5d Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Tue, 2 Aug 2022 12:53:02 -0400 Subject: [PATCH 056/104] OpenJDK Update (July 2022 Patch releases) (#4023) Signed-off-by: Andriy Redko --- .../java/org/opensearch/gradle/test/DistroTestPlugin.java | 4 ++-- buildSrc/version.properties | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java index c947a457d33ec..b14e93ecfd22d 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java @@ -75,9 +75,9 @@ import java.util.stream.Stream; public class DistroTestPlugin implements Plugin { - private static final String SYSTEM_JDK_VERSION = "11.0.15+10"; + private static final String SYSTEM_JDK_VERSION = "11.0.16+8"; private static final String SYSTEM_JDK_VENDOR = "adoptium"; - private static final String GRADLE_JDK_VERSION = "17.0.3+7"; + private static final String GRADLE_JDK_VERSION = "17.0.4+8"; private static final String GRADLE_JDK_VENDOR = "adoptium"; // all distributions used by distro tests. this is temporary until tests are per distribution diff --git a/buildSrc/version.properties b/buildSrc/version.properties index fc751d8461e92..8e9911294977a 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -2,7 +2,7 @@ opensearch = 3.0.0 lucene = 9.3.0 bundled_jdk_vendor = adoptium -bundled_jdk = 17.0.3+7 +bundled_jdk = 17.0.4+8 From 66c24ffc0eb7bf2525504fd3f69c9f11d2dac73a Mon Sep 17 00:00:00 2001 From: Kartik Ganesh Date: Tue, 2 Aug 2022 14:08:58 -0700 Subject: [PATCH 057/104] Adding 2.3.0 to BWC versions (#4099) Signed-off-by: Kartik Ganesh --- .ci/bwcVersions | 1 + server/src/main/java/org/opensearch/Version.java | 1 + 2 files changed, 2 insertions(+) diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 347b88ecc3e4d..67976d58188e2 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -48,3 +48,4 @@ BWC_VERSION: - "2.1.0" - "2.1.1" - "2.2.0" + - "2.3.0" diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index d4c2920c8a452..57f49f9c54591 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -95,6 +95,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_1_0 = new Version(2010099, org.apache.lucene.util.Version.LUCENE_9_2_0); public static final Version V_2_1_1 = new Version(2010199, org.apache.lucene.util.Version.LUCENE_9_2_0); public static final Version V_2_2_0 = new Version(2020099, org.apache.lucene.util.Version.LUCENE_9_3_0); + public static final Version V_2_3_0 = new Version(2030099, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version CURRENT = V_3_0_0; From 6f43dbcba89b55f770d66d80c18f205ebbcdb124 Mon Sep 17 00:00:00 2001 From: Bharathwaj G <58062316+bharath-techie@users.noreply.github.com> Date: Wed, 3 Aug 2022 11:42:07 +0530 Subject: [PATCH 058/104] List all PITs service layer changes (#4016) * List all pits service layer changes Signed-off-by: Bharathwaj G --- .../org/opensearch/action/ActionModule.java | 3 + .../action/search/GetAllPitNodeRequest.java | 35 +++++ .../action/search/GetAllPitNodeResponse.java | 69 +++++++++ .../action/search/GetAllPitNodesRequest.java | 37 +++++ .../action/search/GetAllPitNodesResponse.java | 80 ++++++++++ .../action/search/GetAllPitsAction.java | 23 +++ .../opensearch/action/search/ListPitInfo.java | 63 ++++++++ .../opensearch/action/search/PitService.java | 54 ++++++- .../action/search/SearchTransportService.java | 18 --- .../search/TransportDeletePitAction.java | 30 ++-- .../search/TransportGetAllPitsAction.java | 86 +++++++++++ .../org/opensearch/search/SearchService.java | 36 +++-- .../search/internal/PitReaderContext.java | 2 +- .../search/internal/ReaderContext.java | 4 + .../search/CreatePitControllerTests.java | 8 +- .../action/search/PitTestsUtil.java | 47 ++++++ .../search/TransportDeletePitActionTests.java | 111 +++++++++---- .../search/CreatePitSingleNodeTests.java | 15 +- .../search/DeletePitMultiNodeTests.java | 14 +- ...iNodeTests.java => PitMultiNodeTests.java} | 146 +++++++++++++++++- .../opensearch/search/SearchServiceTests.java | 19 +-- 21 files changed, 793 insertions(+), 107 deletions(-) create mode 100644 server/src/main/java/org/opensearch/action/search/GetAllPitNodeRequest.java create mode 100644 server/src/main/java/org/opensearch/action/search/GetAllPitNodeResponse.java create mode 100644 server/src/main/java/org/opensearch/action/search/GetAllPitNodesRequest.java create mode 100644 server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java create mode 100644 server/src/main/java/org/opensearch/action/search/GetAllPitsAction.java create mode 100644 server/src/main/java/org/opensearch/action/search/ListPitInfo.java create mode 100644 server/src/main/java/org/opensearch/action/search/TransportGetAllPitsAction.java rename server/src/test/java/org/opensearch/search/{CreatePitMultiNodeTests.java => PitMultiNodeTests.java} (63%) diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index 71c900beb5319..2f470d7603869 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -234,12 +234,14 @@ import org.opensearch.action.search.ClearScrollAction; import org.opensearch.action.search.CreatePitAction; import org.opensearch.action.search.DeletePitAction; +import org.opensearch.action.search.GetAllPitsAction; import org.opensearch.action.search.MultiSearchAction; import org.opensearch.action.search.SearchAction; import org.opensearch.action.search.SearchScrollAction; import org.opensearch.action.search.TransportClearScrollAction; import org.opensearch.action.search.TransportCreatePitAction; import org.opensearch.action.search.TransportDeletePitAction; +import org.opensearch.action.search.TransportGetAllPitsAction; import org.opensearch.action.search.TransportMultiSearchAction; import org.opensearch.action.search.TransportSearchAction; import org.opensearch.action.search.TransportSearchScrollAction; @@ -663,6 +665,7 @@ public void reg // point in time actions actions.register(CreatePitAction.INSTANCE, TransportCreatePitAction.class); + actions.register(GetAllPitsAction.INSTANCE, TransportGetAllPitsAction.class); actions.register(DeletePitAction.INSTANCE, TransportDeletePitAction.class); return unmodifiableMap(actions.getRegistry()); diff --git a/server/src/main/java/org/opensearch/action/search/GetAllPitNodeRequest.java b/server/src/main/java/org/opensearch/action/search/GetAllPitNodeRequest.java new file mode 100644 index 0000000000000..c90f75e3c0aed --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/GetAllPitNodeRequest.java @@ -0,0 +1,35 @@ + +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.support.nodes.BaseNodeRequest; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Inner node get all pits request + */ +public class GetAllPitNodeRequest extends BaseNodeRequest { + + public GetAllPitNodeRequest() { + super(); + } + + public GetAllPitNodeRequest(StreamInput in) throws IOException { + super(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/GetAllPitNodeResponse.java b/server/src/main/java/org/opensearch/action/search/GetAllPitNodeResponse.java new file mode 100644 index 0000000000000..ba308a1a6ea1e --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/GetAllPitNodeResponse.java @@ -0,0 +1,69 @@ + +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.support.nodes.BaseNodeResponse; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.xcontent.ToXContentFragment; +import org.opensearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; + +/** + * Inner node get all pits response + */ +public class GetAllPitNodeResponse extends BaseNodeResponse implements ToXContentFragment { + + /** + * List of active PITs in the associated node + */ + private final List pitInfos; + + public GetAllPitNodeResponse(DiscoveryNode node, List pitInfos) { + super(node); + if (pitInfos == null) { + throw new IllegalArgumentException("Pits info cannot be null"); + } + this.pitInfos = Collections.unmodifiableList(pitInfos); + } + + public GetAllPitNodeResponse(StreamInput in) throws IOException { + super(in); + this.pitInfos = Collections.unmodifiableList(in.readList(ListPitInfo::new)); + } + + public List getPitInfos() { + return pitInfos; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeList(pitInfos); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("node", this.getNode().getName()); + builder.startArray("pitInfos"); + for (ListPitInfo pit : pitInfos) { + pit.toXContent(builder, params); + } + + builder.endArray(); + builder.endObject(); + return builder; + } +} diff --git a/server/src/main/java/org/opensearch/action/search/GetAllPitNodesRequest.java b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesRequest.java new file mode 100644 index 0000000000000..b4ad2f6641087 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesRequest.java @@ -0,0 +1,37 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.support.nodes.BaseNodesRequest; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Request to get all active PIT IDs from all nodes of cluster + */ +public class GetAllPitNodesRequest extends BaseNodesRequest { + + @Inject + public GetAllPitNodesRequest(DiscoveryNode... concreteNodes) { + super(concreteNodes); + } + + public GetAllPitNodesRequest(StreamInput in) throws IOException { + super(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java new file mode 100644 index 0000000000000..4a454e7145eff --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java @@ -0,0 +1,80 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.FailedNodeException; +import org.opensearch.action.support.nodes.BaseNodesResponse; +import org.opensearch.cluster.ClusterName; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.xcontent.ToXContentObject; +import org.opensearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * This class transforms active PIT objects from all nodes to unique PIT objects + */ +public class GetAllPitNodesResponse extends BaseNodesResponse implements ToXContentObject { + + /** + * List of unique PITs across all nodes + */ + private final Set pitInfos = new HashSet<>(); + + public GetAllPitNodesResponse(StreamInput in) throws IOException { + super(in); + } + + public GetAllPitNodesResponse( + ClusterName clusterName, + List getAllPitNodeResponse, + List failures + ) { + super(clusterName, getAllPitNodeResponse, failures); + Set uniquePitIds = new HashSet<>(); + pitInfos.addAll( + getAllPitNodeResponse.stream() + .flatMap(p -> p.getPitInfos().stream().filter(t -> uniquePitIds.add(t.getPitId()))) + .collect(Collectors.toList()) + ); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startArray("pitInfos"); + for (ListPitInfo pit : pitInfos) { + pit.toXContent(builder, params); + } + builder.endArray(); + builder.endObject(); + return builder; + } + + @Override + public List readNodesFrom(StreamInput in) throws IOException { + return in.readList(GetAllPitNodeResponse::new); + } + + @Override + public void writeNodesTo(StreamOutput out, List nodes) throws IOException { + out.writeList(nodes); + } + + public List getPitInfos() { + return Collections.unmodifiableList(new ArrayList<>(pitInfos)); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/GetAllPitsAction.java b/server/src/main/java/org/opensearch/action/search/GetAllPitsAction.java new file mode 100644 index 0000000000000..16e65cb785a7d --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/GetAllPitsAction.java @@ -0,0 +1,23 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.ActionType; + +/** + * Action type for listing all PIT reader contexts + */ +public class GetAllPitsAction extends ActionType { + public static final GetAllPitsAction INSTANCE = new GetAllPitsAction(); + public static final String NAME = "indices:data/read/point_in_time/readall"; + + private GetAllPitsAction() { + super(NAME, GetAllPitNodesResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/ListPitInfo.java b/server/src/main/java/org/opensearch/action/search/ListPitInfo.java new file mode 100644 index 0000000000000..4499e7d6e8ef5 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/ListPitInfo.java @@ -0,0 +1,63 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.Writeable; +import org.opensearch.common.xcontent.ToXContentFragment; +import org.opensearch.common.xcontent.XContentBuilder; + +import java.io.IOException; + +/** + * This holds information about pit reader context such as pit id and creation time + */ +public class ListPitInfo implements ToXContentFragment, Writeable { + private final String pitId; + private final long creationTime; + private final long keepAlive; + + public ListPitInfo(String pitId, long creationTime, long keepAlive) { + this.pitId = pitId; + this.creationTime = creationTime; + this.keepAlive = keepAlive; + } + + public ListPitInfo(StreamInput in) throws IOException { + this.pitId = in.readString(); + this.creationTime = in.readLong(); + this.keepAlive = in.readLong(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("pitId", pitId); + builder.field("creationTime", creationTime); + builder.field("keepAlive", keepAlive); + builder.endObject(); + return builder; + } + + public String getPitId() { + return pitId; + } + + public long getCreationTime() { + return creationTime; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(pitId); + out.writeLong(creationTime); + out.writeLong(keepAlive); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/PitService.java b/server/src/main/java/org/opensearch/action/search/PitService.java index 4c5f1a1c0fc4f..0b79b77fd6014 100644 --- a/server/src/main/java/org/opensearch/action/search/PitService.java +++ b/server/src/main/java/org/opensearch/action/search/PitService.java @@ -8,6 +8,7 @@ package org.opensearch.action.search; +import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; @@ -18,10 +19,17 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Strings; import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transport; +import org.opensearch.transport.TransportException; +import org.opensearch.transport.TransportResponseHandler; +import org.opensearch.transport.TransportService; +import java.io.IOException; import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -38,11 +46,13 @@ public class PitService { private final ClusterService clusterService; private final SearchTransportService searchTransportService; + private final TransportService transportService; @Inject - public PitService(ClusterService clusterService, SearchTransportService searchTransportService) { + public PitService(ClusterService clusterService, SearchTransportService searchTransportService, TransportService transportService) { this.clusterService = clusterService; this.searchTransportService = searchTransportService; + this.transportService = transportService; } /** @@ -52,6 +62,9 @@ public void deletePitContexts( Map> nodeToContextsMap, ActionListener listener ) { + if (nodeToContextsMap.size() == 0) { + listener.onResponse(new DeletePitResponse(Collections.emptyList())); + } final Set clusters = nodeToContextsMap.values() .stream() .flatMap(Collection::stream) @@ -130,4 +143,43 @@ public void onFailure(final Exception e) { } }, size); } + + /** + * Get all active point in time contexts + */ + public void getAllPits(ActionListener getAllPitsListener) { + final List nodes = new ArrayList<>(); + for (ObjectCursor cursor : clusterService.state().nodes().getDataNodes().values()) { + DiscoveryNode node = cursor.value; + nodes.add(node); + } + DiscoveryNode[] disNodesArr = nodes.toArray(new DiscoveryNode[nodes.size()]); + transportService.sendRequest( + transportService.getLocalNode(), + GetAllPitsAction.NAME, + new GetAllPitNodesRequest(disNodesArr), + new TransportResponseHandler() { + + @Override + public void handleResponse(GetAllPitNodesResponse response) { + getAllPitsListener.onResponse(response); + } + + @Override + public void handleException(TransportException exp) { + getAllPitsListener.onFailure(exp); + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + + @Override + public GetAllPitNodesResponse read(StreamInput in) throws IOException { + return new GetAllPitNodesResponse(in); + } + } + ); + } } diff --git a/server/src/main/java/org/opensearch/action/search/SearchTransportService.java b/server/src/main/java/org/opensearch/action/search/SearchTransportService.java index 23515dd28b329..241b3de72a258 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/opensearch/action/search/SearchTransportService.java @@ -218,16 +218,6 @@ public void sendFreePITContexts( ); } - public void sendFreeAllPitContexts(Transport.Connection connection, final ActionListener listener) { - transportService.sendRequest( - connection, - FREE_ALL_PIT_CONTEXTS_ACTION_NAME, - TransportRequest.Empty.INSTANCE, - TransportRequestOptions.EMPTY, - new ActionListenerResponseHandler<>(listener, DeletePitResponse::new) - ); - } - public void sendExecuteDfs( Transport.Connection connection, final ShardSearchRequest request, @@ -528,14 +518,6 @@ public static void registerRequestHandler(TransportService transportService, Sea ); TransportActionProxy.registerProxyAction(transportService, FREE_PIT_CONTEXT_ACTION_NAME, DeletePitResponse::new); - transportService.registerRequestHandler( - FREE_ALL_PIT_CONTEXTS_ACTION_NAME, - ThreadPool.Names.SAME, - TransportRequest.Empty::new, - (request, channel, task) -> { channel.sendResponse(searchService.freeAllPitContexts()); } - ); - TransportActionProxy.registerProxyAction(transportService, FREE_ALL_PIT_CONTEXTS_ACTION_NAME, DeletePitResponse::new); - transportService.registerRequestHandler( FREE_CONTEXT_ACTION_NAME, ThreadPool.Names.SAME, diff --git a/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java b/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java index d67979d1c87c5..f9e36c479dd54 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java @@ -11,18 +11,17 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.HandledTransportAction; -import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; import org.opensearch.common.io.stream.NamedWriteableRegistry; import org.opensearch.tasks.Task; -import org.opensearch.transport.Transport; import org.opensearch.transport.TransportService; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; /** * Transport action for deleting point in time searches - supports deleting list and all point in time searches @@ -83,19 +82,22 @@ private void deletePits(ActionListener listener, DeletePitReq } /** - * Delete all active PIT reader contexts + * Delete all active PIT reader contexts leveraging list all PITs + * + * For Cross cluster PITs : + * - mixed cluster PITs ( PIT comprising local and remote ) will be fully deleted. Since there will atleast be + * one reader context with PIT ID present in local cluster, 'Get all PITs' will retrieve the PIT ID with which + * we can completely delete the PIT contexts in both local and remote cluster. + * - fully remote PITs will not be deleted as 'Get all PITs' operates on local cluster only and no PIT info can + * be retrieved when it's fully remote. */ private void deleteAllPits(ActionListener listener) { - // TODO: Use list all PITs to delete all PITs in case of remote cluster use case - int size = clusterService.state().getNodes().getSize(); - ActionListener groupedActionListener = pitService.getDeletePitGroupedListener(listener, size); - for (final DiscoveryNode node : clusterService.state().getNodes()) { - try { - Transport.Connection connection = searchTransportService.getConnection(null, node); - searchTransportService.sendFreeAllPitContexts(connection, groupedActionListener); - } catch (Exception e) { - groupedActionListener.onFailure(e); - } - } + // Get all PITs and execute delete operation for the PITs. + pitService.getAllPits(ActionListener.wrap(getAllPitNodesResponse -> { + DeletePitRequest deletePitRequest = new DeletePitRequest( + getAllPitNodesResponse.getPitInfos().stream().map(r -> r.getPitId()).collect(Collectors.toList()) + ); + deletePits(listener, deletePitRequest); + }, listener::onFailure)); } } diff --git a/server/src/main/java/org/opensearch/action/search/TransportGetAllPitsAction.java b/server/src/main/java/org/opensearch/action/search/TransportGetAllPitsAction.java new file mode 100644 index 0000000000000..21a64e388fa7b --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/TransportGetAllPitsAction.java @@ -0,0 +1,86 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.FailedNodeException; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.nodes.TransportNodesAction; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.search.SearchService; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.List; + +/** + * Transport action to get all active PIT contexts across all nodes + */ +public class TransportGetAllPitsAction extends TransportNodesAction< + GetAllPitNodesRequest, + GetAllPitNodesResponse, + GetAllPitNodeRequest, + GetAllPitNodeResponse> { + private final SearchService searchService; + + @Inject + public TransportGetAllPitsAction( + ThreadPool threadPool, + ClusterService clusterService, + TransportService transportService, + ActionFilters actionFilters, + SearchService searchService + ) { + super( + GetAllPitsAction.NAME, + threadPool, + clusterService, + transportService, + actionFilters, + GetAllPitNodesRequest::new, + GetAllPitNodeRequest::new, + ThreadPool.Names.SAME, + GetAllPitNodeResponse.class + ); + this.searchService = searchService; + } + + @Override + protected GetAllPitNodesResponse newResponse( + GetAllPitNodesRequest request, + List getAllPitNodeRespons, + List failures + ) { + return new GetAllPitNodesResponse(clusterService.getClusterName(), getAllPitNodeRespons, failures); + } + + @Override + protected GetAllPitNodeRequest newNodeRequest(GetAllPitNodesRequest request) { + return new GetAllPitNodeRequest(); + } + + @Override + protected GetAllPitNodeResponse newNodeResponse(StreamInput in) throws IOException { + return new GetAllPitNodeResponse(in); + } + + /** + * This retrieves all active PITs in the node + */ + @Override + protected GetAllPitNodeResponse nodeOperation(GetAllPitNodeRequest request) { + GetAllPitNodeResponse nodeResponse = new GetAllPitNodeResponse( + transportService.getLocalNode(), + searchService.getAllPITReaderContexts() + ); + return nodeResponse; + } +} diff --git a/server/src/main/java/org/opensearch/search/SearchService.java b/server/src/main/java/org/opensearch/search/SearchService.java index 747b13b031824..4bd95da193668 100644 --- a/server/src/main/java/org/opensearch/search/SearchService.java +++ b/server/src/main/java/org/opensearch/search/SearchService.java @@ -42,7 +42,11 @@ import org.opensearch.action.ActionRunnable; import org.opensearch.action.OriginalIndices; import org.opensearch.action.search.DeletePitInfo; +import org.opensearch.action.search.DeletePitInfo; +import org.opensearch.action.search.DeletePitResponse; import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.action.search.ListPitInfo; +import org.opensearch.action.search.PitSearchContextIdForNode; import org.opensearch.action.search.PitSearchContextIdForNode; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchShardTask; @@ -142,6 +146,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -946,6 +951,21 @@ public PitReaderContext getPitReaderContext(ShardSearchContextId id) { return null; } + /** + * This method returns all active PIT reader contexts + */ + public List getAllPITReaderContexts() { + final List pitContextsInfo = new ArrayList<>(); + for (ReaderContext ctx : activeReaders.values()) { + if (ctx instanceof PitReaderContext) { + final PitReaderContext context = (PitReaderContext) ctx; + ListPitInfo pitInfo = new ListPitInfo(context.getPitId(), context.getCreationTime(), context.getKeepAlive()); + pitContextsInfo.add(pitInfo); + } + } + return pitContextsInfo; + } + final SearchContext createContext( ReaderContext readerContext, ShardSearchRequest request, @@ -1102,22 +1122,6 @@ public DeletePitResponse freeReaderContextsIfFound(List deleteResults = new ArrayList<>(); - for (ReaderContext readerContext : activeReaders.values()) { - if (readerContext instanceof PitReaderContext) { - boolean result = freeReaderContext(readerContext.id()); - DeletePitInfo deletePitInfo = new DeletePitInfo(result, ((PitReaderContext) readerContext).getPitId()); - deleteResults.add(deletePitInfo); - } - } - return new DeletePitResponse(deleteResults); - } - private long getKeepAlive(ShardSearchRequest request) { if (request.scroll() != null) { return getScrollKeepAlive(request.scroll()); diff --git a/server/src/main/java/org/opensearch/search/internal/PitReaderContext.java b/server/src/main/java/org/opensearch/search/internal/PitReaderContext.java index 1b2400bdadfa1..98e84136a8847 100644 --- a/server/src/main/java/org/opensearch/search/internal/PitReaderContext.java +++ b/server/src/main/java/org/opensearch/search/internal/PitReaderContext.java @@ -61,7 +61,7 @@ public Releasable updatePitIdAndKeepAlive(long keepAliveInMillis, String pitId, } public long getCreationTime() { - return this.creationTime.get(); + return this.creationTime.get() == null ? 0 : this.creationTime.get(); } public void setCreationTime(final long creationTime) { diff --git a/server/src/main/java/org/opensearch/search/internal/ReaderContext.java b/server/src/main/java/org/opensearch/search/internal/ReaderContext.java index fe644e44f297f..898aa2e2c6745 100644 --- a/server/src/main/java/org/opensearch/search/internal/ReaderContext.java +++ b/server/src/main/java/org/opensearch/search/internal/ReaderContext.java @@ -117,6 +117,10 @@ protected long nowInMillis() { return indexShard.getThreadPool().relativeTimeInMillis(); } + public long getKeepAlive() { + return keepAlive.get(); + } + @Override public final void close() { if (closed.compareAndSet(false, true)) { diff --git a/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java b/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java index 2c65d6ffcc813..a5c6e1c12b79c 100644 --- a/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java +++ b/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java @@ -219,7 +219,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); request.setIndices(new String[] { "index" }); - PitService pitService = new PitService(clusterServiceMock, searchTransportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); CreatePitController controller = new CreatePitController( searchTransportService, clusterServiceMock, @@ -308,7 +308,7 @@ public void sendFreePITContexts( CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); request.setIndices(new String[] { "index" }); - PitService pitService = new PitService(clusterServiceMock, searchTransportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); CreatePitController controller = new CreatePitController( searchTransportService, clusterServiceMock, @@ -406,7 +406,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); request.setIndices(new String[] { "index" }); - PitService pitService = new PitService(clusterServiceMock, searchTransportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); CreatePitController controller = new CreatePitController( searchTransportService, clusterServiceMock, @@ -494,7 +494,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod }; CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); request.setIndices(new String[] { "index" }); - PitService pitService = new PitService(clusterServiceMock, searchTransportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); CreatePitController controller = new CreatePitController( searchTransportService, clusterServiceMock, diff --git a/server/src/test/java/org/opensearch/action/search/PitTestsUtil.java b/server/src/test/java/org/opensearch/action/search/PitTestsUtil.java index ec83cb45697d9..433cd9dfa3e89 100644 --- a/server/src/test/java/org/opensearch/action/search/PitTestsUtil.java +++ b/server/src/test/java/org/opensearch/action/search/PitTestsUtil.java @@ -8,7 +8,14 @@ package org.opensearch.action.search; +import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.junit.Assert; import org.opensearch.Version; +import org.opensearch.action.ActionFuture; +import org.opensearch.action.admin.cluster.state.ClusterStateRequest; +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.client.Client; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.util.concurrent.AtomicArray; import org.opensearch.index.query.IdsQueryBuilder; import org.opensearch.index.query.MatchAllQueryBuilder; @@ -21,7 +28,10 @@ import org.opensearch.search.internal.ShardSearchContextId; import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; import java.util.Map; +import java.util.concurrent.ExecutionException; import static org.opensearch.test.OpenSearchTestCase.between; import static org.opensearch.test.OpenSearchTestCase.randomAlphaOfLength; @@ -81,4 +91,41 @@ public static String getPitId() { } return SearchContextId.encode(array.asList(), aliasFilters, version); } + + public static void assertUsingGetAllPits(Client client, String id, long creationTime) throws ExecutionException, InterruptedException { + final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + clusterStateRequest.local(false); + clusterStateRequest.clear().nodes(true).routingTable(true).indices("*"); + ClusterStateResponse clusterStateResponse = client.admin().cluster().state(clusterStateRequest).get(); + final List nodes = new LinkedList<>(); + for (ObjectCursor cursor : clusterStateResponse.getState().nodes().getDataNodes().values()) { + DiscoveryNode node = cursor.value; + nodes.add(node); + } + DiscoveryNode[] disNodesArr = new DiscoveryNode[nodes.size()]; + nodes.toArray(disNodesArr); + GetAllPitNodesRequest getAllPITNodesRequest = new GetAllPitNodesRequest(disNodesArr); + ActionFuture execute1 = client.execute(GetAllPitsAction.INSTANCE, getAllPITNodesRequest); + GetAllPitNodesResponse getPitResponse = execute1.get(); + Assert.assertTrue(getPitResponse.getPitInfos().get(0).getPitId().contains(id)); + Assert.assertEquals(getPitResponse.getPitInfos().get(0).getCreationTime(), creationTime); + } + + public static void assertGetAllPitsEmpty(Client client) throws ExecutionException, InterruptedException { + final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + clusterStateRequest.local(false); + clusterStateRequest.clear().nodes(true).routingTable(true).indices("*"); + ClusterStateResponse clusterStateResponse = client.admin().cluster().state(clusterStateRequest).get(); + final List nodes = new LinkedList<>(); + for (ObjectCursor cursor : clusterStateResponse.getState().nodes().getDataNodes().values()) { + DiscoveryNode node = cursor.value; + nodes.add(node); + } + DiscoveryNode[] disNodesArr = new DiscoveryNode[nodes.size()]; + nodes.toArray(disNodesArr); + GetAllPitNodesRequest getAllPITNodesRequest = new GetAllPitNodesRequest(disNodesArr); + ActionFuture execute1 = client.execute(GetAllPitsAction.INSTANCE, getAllPITNodesRequest); + GetAllPitNodesResponse getPitResponse = execute1.get(); + Assert.assertEquals(0, getPitResponse.getPitInfos().size()); + } } diff --git a/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java b/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java index e2db4fdbc97ef..7a1d9a6fe963c 100644 --- a/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java @@ -7,13 +7,13 @@ */ package org.opensearch.action.search; -import org.apache.lucene.search.TotalHits; import org.junit.Before; import org.opensearch.Version; import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilter; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; @@ -25,10 +25,6 @@ import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.TermQueryBuilder; -import org.opensearch.search.SearchHit; -import org.opensearch.search.SearchHits; -import org.opensearch.search.aggregations.InternalAggregations; -import org.opensearch.search.internal.InternalSearchResponse; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskId; import org.opensearch.test.OpenSearchTestCase; @@ -109,15 +105,6 @@ public void setupData() { new TaskId(randomLong() + ":" + randomLong()), Collections.emptyMap() ); - InternalSearchResponse response = new InternalSearchResponse( - new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), Float.NaN), - InternalAggregations.EMPTY, - null, - null, - false, - null, - 1 - ); clusterServiceMock = mock(ClusterService.class); ClusterState state = mock(ClusterState.class); @@ -178,7 +165,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); TransportDeletePitAction action = new TransportDeletePitAction( transportService, actionFilters, @@ -224,7 +211,11 @@ public void testDeleteAllPITSuccess() throws InterruptedException, ExecutionExce transportService.start(); transportService.acceptIncomingRequests(); SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { - public void sendFreeAllPitContexts(Transport.Connection connection, final ActionListener listener) { + public void sendFreePITContexts( + Transport.Connection connection, + List contextIds, + final ActionListener listener + ) { deleteNodesInvoked.add(connection.getNode()); DeletePitInfo deletePitInfo = new DeletePitInfo(true, "pitId"); List deletePitInfos = new ArrayList<>(); @@ -238,7 +229,21 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService) { + @Override + public void getAllPits(ActionListener getAllPitsListener) { + ListPitInfo listPitInfo = new ListPitInfo(getPitId(), 0, 0); + List list = new ArrayList<>(); + list.add(listPitInfo); + GetAllPitNodeResponse getAllPitNodeResponse = new GetAllPitNodeResponse( + cluster1Transport.getLocalDiscoNode(), + list + ); + List nodeList = new ArrayList(); + nodeList.add(getAllPitNodeResponse); + getAllPitsListener.onResponse(new GetAllPitNodesResponse(new ClusterName("cn"), nodeList, new ArrayList())); + } + }; TransportDeletePitAction action = new TransportDeletePitAction( transportService, actionFilters, @@ -307,7 +312,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); TransportDeletePitAction action = new TransportDeletePitAction( transportService, actionFilters, @@ -366,7 +371,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); TransportDeletePitAction action = new TransportDeletePitAction( transportService, actionFilters, @@ -434,7 +439,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService); TransportDeletePitAction action = new TransportDeletePitAction( transportService, actionFilters, @@ -480,7 +485,11 @@ public void testDeleteAllPitWhenNodeIsDown() { transportService.acceptIncomingRequests(); SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { @Override - public void sendFreeAllPitContexts(Transport.Connection connection, final ActionListener listener) { + public void sendFreePITContexts( + Transport.Connection connection, + List contextIds, + final ActionListener listener + ) { deleteNodesInvoked.add(connection.getNode()); if (connection.getNode().getId() == "node_3") { Thread t = new Thread(() -> listener.onFailure(new Exception("node 3 down"))); @@ -496,7 +505,21 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService) { + @Override + public void getAllPits(ActionListener getAllPitsListener) { + ListPitInfo listPitInfo = new ListPitInfo(getPitId(), 0, 0); + List list = new ArrayList<>(); + list.add(listPitInfo); + GetAllPitNodeResponse getAllPitNodeResponse = new GetAllPitNodeResponse( + cluster1Transport.getLocalDiscoNode(), + list + ); + List nodeList = new ArrayList(); + nodeList.add(getAllPitNodeResponse); + getAllPitsListener.onResponse(new GetAllPitNodesResponse(new ClusterName("cn"), nodeList, new ArrayList())); + } + }; TransportDeletePitAction action = new TransportDeletePitAction( transportService, actionFilters, @@ -543,7 +566,11 @@ public void testDeleteAllPitWhenAllNodesAreDown() { SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { @Override - public void sendFreeAllPitContexts(Transport.Connection connection, final ActionListener listener) { + public void sendFreePITContexts( + Transport.Connection connection, + List contextIds, + final ActionListener listener + ) { deleteNodesInvoked.add(connection.getNode()); Thread t = new Thread(() -> listener.onFailure(new Exception("node down"))); t.start(); @@ -554,7 +581,21 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService) { + @Override + public void getAllPits(ActionListener getAllPitsListener) { + ListPitInfo listPitInfo = new ListPitInfo(getPitId(), 0, 0); + List list = new ArrayList<>(); + list.add(listPitInfo); + GetAllPitNodeResponse getAllPitNodeResponse = new GetAllPitNodeResponse( + cluster1Transport.getLocalDiscoNode(), + list + ); + List nodeList = new ArrayList(); + nodeList.add(getAllPitNodeResponse); + getAllPitsListener.onResponse(new GetAllPitNodesResponse(new ClusterName("cn"), nodeList, new ArrayList())); + } + }; TransportDeletePitAction action = new TransportDeletePitAction( transportService, actionFilters, @@ -600,7 +641,11 @@ public void testDeleteAllPitFailure() { transportService.acceptIncomingRequests(); SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { - public void sendFreeAllPitContexts(Transport.Connection connection, final ActionListener listener) { + public void sendFreePITContexts( + Transport.Connection connection, + List contextId, + final ActionListener listener + ) { deleteNodesInvoked.add(connection.getNode()); if (connection.getNode().getId() == "node_3") { Thread t = new Thread(() -> listener.onFailure(new Exception("node 3 is down"))); @@ -616,7 +661,21 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod return new SearchAsyncActionTests.MockConnection(node); } }; - PitService pitService = new PitService(clusterServiceMock, searchTransportService); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService) { + @Override + public void getAllPits(ActionListener getAllPitsListener) { + ListPitInfo listPitInfo = new ListPitInfo(getPitId(), 0, 0); + List list = new ArrayList<>(); + list.add(listPitInfo); + GetAllPitNodeResponse getAllPitNodeResponse = new GetAllPitNodeResponse( + cluster1Transport.getLocalDiscoNode(), + list + ); + List nodeList = new ArrayList(); + nodeList.add(getAllPitNodeResponse); + getAllPitsListener.onResponse(new GetAllPitNodesResponse(new ClusterName("cn"), nodeList, new ArrayList())); + } + }; TransportDeletePitAction action = new TransportDeletePitAction( transportService, actionFilters, diff --git a/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java b/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java index 5c3c43af9cb66..b28423c3a8657 100644 --- a/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java +++ b/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java @@ -14,6 +14,7 @@ import org.opensearch.action.search.CreatePitController; import org.opensearch.action.search.CreatePitRequest; import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.PitTestsUtil; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.Priority; @@ -63,6 +64,7 @@ public void testCreatePITSuccess() throws ExecutionException, InterruptedExcepti request.setIndices(new String[] { "index" }); ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); CreatePitResponse pitResponse = execute.get(); + PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime()); client().prepareIndex("index").setId("2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); SearchResponse searchResponse = client().prepareSearch("index") .setSize(2) @@ -86,6 +88,7 @@ public void testCreatePITWithMultipleIndicesSuccess() throws ExecutionException, ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); CreatePitResponse response = execute.get(); + PitTestsUtil.assertUsingGetAllPits(client(), response.getId(), response.getCreationTime()); assertEquals(4, response.getSuccessfulShards()); assertEquals(4, service.getActiveContexts()); service.doClose(); @@ -99,7 +102,7 @@ public void testCreatePITWithShardReplicasSuccess() throws ExecutionException, I request.setIndices(new String[] { "index" }); ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); CreatePitResponse pitResponse = execute.get(); - + PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime()); client().prepareIndex("index").setId("2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); SearchResponse searchResponse = client().prepareSearch("index") .setSize(2) @@ -128,7 +131,7 @@ public void testCreatePITWithNonExistentIndex() { service.doClose(); } - public void testCreatePITOnCloseIndex() { + public void testCreatePITOnCloseIndex() throws ExecutionException, InterruptedException { createIndex("index", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build()); client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); client().prepareIndex("index").setId("2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); @@ -144,6 +147,7 @@ public void testCreatePITOnCloseIndex() { SearchService service = getInstanceFromNode(SearchService.class); assertEquals(0, service.getActiveContexts()); + PitTestsUtil.assertGetAllPitsEmpty(client()); service.doClose(); } @@ -165,6 +169,7 @@ public void testPitSearchOnDeletedIndex() throws ExecutionException, Interrupted }); assertTrue(ex.getMessage().contains("no such index [index]")); SearchService service = getInstanceFromNode(SearchService.class); + PitTestsUtil.assertGetAllPitsEmpty(client()); assertEquals(0, service.getActiveContexts()); service.doClose(); } @@ -190,6 +195,7 @@ public void testPitSearchOnCloseIndex() throws ExecutionException, InterruptedEx request.setIndices(new String[] { "index" }); ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); CreatePitResponse pitResponse = execute.get(); + PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime()); SearchService service = getInstanceFromNode(SearchService.class); assertEquals(2, service.getActiveContexts()); client().admin().indices().prepareClose("index").get(); @@ -201,6 +207,7 @@ public void testPitSearchOnCloseIndex() throws ExecutionException, InterruptedEx }); assertTrue(ex.shardFailures()[0].reason().contains("SearchContextMissingException")); assertEquals(0, service.getActiveContexts()); + PitTestsUtil.assertGetAllPitsEmpty(client()); // PIT reader contexts are lost after close, verifying it with open index api client().admin().indices().prepareOpen("index").get(); @@ -314,6 +321,7 @@ public void testPitAfterUpdateIndex() throws Exception { request.setIndices(new String[] { "test" }); ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); CreatePitResponse pitResponse = execute.get(); + PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime()); SearchService service = getInstanceFromNode(SearchService.class); assertThat( @@ -456,6 +464,7 @@ public void testPitAfterUpdateIndex() throws Exception { } finally { service.doClose(); assertEquals(0, service.getActiveContexts()); + PitTestsUtil.assertGetAllPitsEmpty(client()); } } @@ -467,6 +476,7 @@ public void testConcurrentSearches() throws Exception { request.setIndices(new String[] { "index" }); ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); CreatePitResponse pitResponse = execute.get(); + PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime()); Thread[] threads = new Thread[5]; CountDownLatch latch = new CountDownLatch(threads.length); @@ -497,5 +507,6 @@ public void testConcurrentSearches() throws Exception { assertEquals(2, service.getActiveContexts()); service.doClose(); assertEquals(0, service.getActiveContexts()); + PitTestsUtil.assertGetAllPitsEmpty(client()); } } diff --git a/server/src/test/java/org/opensearch/search/DeletePitMultiNodeTests.java b/server/src/test/java/org/opensearch/search/DeletePitMultiNodeTests.java index 89607b9201cd9..70efcf88cd581 100644 --- a/server/src/test/java/org/opensearch/search/DeletePitMultiNodeTests.java +++ b/server/src/test/java/org/opensearch/search/DeletePitMultiNodeTests.java @@ -181,7 +181,6 @@ public Settings onNodeStopped(String nodeName) throws Exception { DeletePitResponse deletePITResponse = execute.get(); for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { assertTrue(pitIds.contains(deletePitInfo.getPitId())); - assertFalse(deletePitInfo.isSuccessful()); } } catch (Exception e) { throw new AssertionError(e); @@ -205,9 +204,9 @@ public Settings onNodeStopped(String nodeName) throws Exception { } public void testDeleteAllPitsWhileNodeDrop() throws Exception { - createPitOnIndex("index"); createIndex("index1", Settings.builder().put("index.number_of_shards", 5).put("index.number_of_replicas", 1).build()); client().prepareIndex("index1").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).execute().get(); + createPitOnIndex("index1"); ensureGreen(); DeletePitRequest deletePITRequest = new DeletePitRequest("_all"); internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { @@ -218,7 +217,6 @@ public Settings onNodeStopped(String nodeName) throws Exception { DeletePitResponse deletePITResponse = execute.get(); for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { assertThat(deletePitInfo.getPitId(), not(blankOrNullString())); - assertFalse(deletePitInfo.isSuccessful()); } } catch (Exception e) { assertTrue(e.getMessage().contains("Node not connected")); @@ -226,18 +224,14 @@ public Settings onNodeStopped(String nodeName) throws Exception { return super.onNodeStopped(nodeName); } }); - ensureGreen(); /** - * When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context - * not found exceptions don't result in failures ( as deletion in one node is successful ) + * When we invoke delete again, returns success as all readers are cleared. (Delete all on node which is Up and + * once the node restarts, all active contexts are cleared in the node ) */ ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); DeletePitResponse deletePITResponse = execute.get(); - for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { - assertThat(deletePitInfo.getPitId(), not(blankOrNullString())); - assertTrue(deletePitInfo.isSuccessful()); - } + assertEquals(0, deletePITResponse.getDeletePitResults().size()); client().admin().indices().prepareDelete("index1").get(); } diff --git a/server/src/test/java/org/opensearch/search/CreatePitMultiNodeTests.java b/server/src/test/java/org/opensearch/search/PitMultiNodeTests.java similarity index 63% rename from server/src/test/java/org/opensearch/search/CreatePitMultiNodeTests.java rename to server/src/test/java/org/opensearch/search/PitMultiNodeTests.java index 27d8f27add898..f93a43a027da7 100644 --- a/server/src/test/java/org/opensearch/search/CreatePitMultiNodeTests.java +++ b/server/src/test/java/org/opensearch/search/PitMultiNodeTests.java @@ -8,18 +8,27 @@ package org.opensearch.search; +import com.carrotsearch.hppc.cursors.ObjectCursor; import org.junit.After; +import org.junit.Assert; import org.junit.Before; import org.opensearch.action.ActionFuture; import org.opensearch.action.ActionListener; import org.opensearch.action.LatchedActionListener; +import org.opensearch.action.admin.cluster.state.ClusterStateRequest; +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; import org.opensearch.action.search.CreatePitAction; import org.opensearch.action.search.CreatePitRequest; import org.opensearch.action.search.CreatePitResponse; import org.opensearch.action.search.DeletePitAction; import org.opensearch.action.search.DeletePitRequest; import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.action.search.GetAllPitNodesRequest; +import org.opensearch.action.search.GetAllPitNodesResponse; +import org.opensearch.action.search.GetAllPitsAction; +import org.opensearch.action.search.PitTestsUtil; import org.opensearch.action.search.SearchResponse; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.search.builder.PointInTimeBuilder; @@ -30,12 +39,14 @@ import java.util.ArrayList; import java.util.HashSet; +import java.util.LinkedList; import java.util.List; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; import static org.hamcrest.Matchers.containsString; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; @@ -45,7 +56,7 @@ * Multi node integration tests for PIT creation and search operation with PIT ID. */ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, numDataNodes = 2) -public class CreatePitMultiNodeTests extends OpenSearchIntegTestCase { +public class PitMultiNodeTests extends OpenSearchIntegTestCase { @Before public void setupIndex() throws ExecutionException, InterruptedException { @@ -70,6 +81,7 @@ public void testPit() throws Exception { .get(); assertEquals(2, searchResponse.getSuccessfulShards()); assertEquals(2, searchResponse.getTotalShards()); + PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime()); } public void testCreatePitWhileNodeDropWithAllowPartialCreationFalse() throws Exception { @@ -95,6 +107,7 @@ public void testCreatePitWhileNodeDropWithAllowPartialCreationTrue() throws Exce public Settings onNodeStopped(String nodeName) throws Exception { ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); CreatePitResponse pitResponse = execute.get(); + PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime()); assertEquals(1, pitResponse.getSuccessfulShards()); assertEquals(2, pitResponse.getTotalShards()); SearchResponse searchResponse = client().prepareSearch("index") @@ -124,6 +137,7 @@ public Settings onNodeStopped(String nodeName) throws Exception { assertEquals(1, searchResponse.getFailedShards()); assertEquals(0, searchResponse.getSkippedShards()); assertEquals(2, searchResponse.getTotalShards()); + PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime()); return super.onNodeStopped(nodeName); } }); @@ -264,7 +278,7 @@ public void testConcurrentCreatesWithDeletes() throws InterruptedException, Exec AtomicInteger numSuccess = new AtomicInteger(); TestThreadPool testThreadPool = null; try { - testThreadPool = new TestThreadPool(CreatePitMultiNodeTests.class.getName()); + testThreadPool = new TestThreadPool(PitMultiNodeTests.class.getName()); int concurrentRuns = randomIntBetween(20, 50); List operationThreads = new ArrayList<>(); @@ -312,4 +326,132 @@ public void onFailure(Exception e) {} ThreadPool.terminate(testThreadPool, 500, TimeUnit.MILLISECONDS); } } + + public void testGetAllPits() throws Exception { + client().admin().indices().prepareCreate("index1").get(); + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index", "index1" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + CreatePitResponse pitResponse1 = client().execute(CreatePitAction.INSTANCE, request).get(); + CreatePitResponse pitResponse2 = client().execute(CreatePitAction.INSTANCE, request).get(); + final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + clusterStateRequest.local(false); + clusterStateRequest.clear().nodes(true).routingTable(true).indices("*"); + ClusterStateResponse clusterStateResponse = client().admin().cluster().state(clusterStateRequest).get(); + final List nodes = new LinkedList<>(); + for (ObjectCursor cursor : clusterStateResponse.getState().nodes().getDataNodes().values()) { + DiscoveryNode node = cursor.value; + nodes.add(node); + } + DiscoveryNode[] disNodesArr = new DiscoveryNode[nodes.size()]; + nodes.toArray(disNodesArr); + GetAllPitNodesRequest getAllPITNodesRequest = new GetAllPitNodesRequest(disNodesArr); + ActionFuture execute1 = client().execute(GetAllPitsAction.INSTANCE, getAllPITNodesRequest); + GetAllPitNodesResponse getPitResponse = execute1.get(); + assertEquals(3, getPitResponse.getPitInfos().size()); + List resultPitIds = getPitResponse.getPitInfos().stream().map(p -> p.getPitId()).collect(Collectors.toList()); + // asserting that we get all unique PIT IDs + Assert.assertTrue(resultPitIds.contains(pitResponse.getId())); + Assert.assertTrue(resultPitIds.contains(pitResponse1.getId())); + Assert.assertTrue(resultPitIds.contains(pitResponse2.getId())); + client().admin().indices().prepareDelete("index1").get(); + } + + public void testGetAllPitsDuringNodeDrop() throws Exception { + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + GetAllPitNodesRequest getAllPITNodesRequest = new GetAllPitNodesRequest(getDiscoveryNodes()); + internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) throws Exception { + ActionFuture execute1 = client().execute(GetAllPitsAction.INSTANCE, getAllPITNodesRequest); + GetAllPitNodesResponse getPitResponse = execute1.get(); + // we still get a pit id from the data node which is up + assertEquals(1, getPitResponse.getPitInfos().size()); + // failure for node drop + assertEquals(1, getPitResponse.failures().size()); + assertTrue(getPitResponse.failures().get(0).getMessage().contains("Failed node")); + return super.onNodeStopped(nodeName); + } + }); + } + + private DiscoveryNode[] getDiscoveryNodes() throws ExecutionException, InterruptedException { + final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + clusterStateRequest.local(false); + clusterStateRequest.clear().nodes(true).routingTable(true).indices("*"); + ClusterStateResponse clusterStateResponse = client().admin().cluster().state(clusterStateRequest).get(); + final List nodes = new LinkedList<>(); + for (ObjectCursor cursor : clusterStateResponse.getState().nodes().getDataNodes().values()) { + DiscoveryNode node = cursor.value; + nodes.add(node); + } + DiscoveryNode[] disNodesArr = new DiscoveryNode[nodes.size()]; + nodes.toArray(disNodesArr); + return disNodesArr; + } + + public void testConcurrentGetWithDeletes() throws InterruptedException, ExecutionException { + CreatePitRequest createPitRequest = new CreatePitRequest(TimeValue.timeValueDays(1), true); + createPitRequest.setIndices(new String[] { "index" }); + List pitIds = new ArrayList<>(); + String id = client().execute(CreatePitAction.INSTANCE, createPitRequest).get().getId(); + pitIds.add(id); + DeletePitRequest deletePITRequest = new DeletePitRequest(pitIds); + GetAllPitNodesRequest getAllPITNodesRequest = new GetAllPitNodesRequest(getDiscoveryNodes()); + AtomicInteger numSuccess = new AtomicInteger(); + TestThreadPool testThreadPool = null; + try { + testThreadPool = new TestThreadPool(PitMultiNodeTests.class.getName()); + int concurrentRuns = randomIntBetween(20, 50); + + List operationThreads = new ArrayList<>(); + CountDownLatch countDownLatch = new CountDownLatch(concurrentRuns); + long randomDeleteThread = randomLongBetween(0, concurrentRuns - 1); + for (int i = 0; i < concurrentRuns; i++) { + int currentThreadIteration = i; + Runnable thread = () -> { + if (currentThreadIteration == randomDeleteThread) { + LatchedActionListener listener = new LatchedActionListener<>(new ActionListener() { + @Override + public void onResponse(GetAllPitNodesResponse getAllPitNodesResponse) { + if (getAllPitNodesResponse.failures().isEmpty()) { + numSuccess.incrementAndGet(); + } + } + + @Override + public void onFailure(Exception e) {} + }, countDownLatch); + client().execute(GetAllPitsAction.INSTANCE, getAllPITNodesRequest, listener); + } else { + LatchedActionListener listener = new LatchedActionListener<>(new ActionListener() { + @Override + public void onResponse(DeletePitResponse deletePitResponse) { + if (deletePitResponse.getDeletePitResults().get(0).isSuccessful()) { + numSuccess.incrementAndGet(); + } + } + + @Override + public void onFailure(Exception e) {} + }, countDownLatch); + client().execute(DeletePitAction.INSTANCE, deletePITRequest, listener); + } + }; + operationThreads.add(thread); + } + TestThreadPool finalTestThreadPool = testThreadPool; + operationThreads.forEach(runnable -> finalTestThreadPool.executor("generic").execute(runnable)); + countDownLatch.await(); + assertEquals(concurrentRuns, numSuccess.get()); + + } finally { + ThreadPool.terminate(testThreadPool, 500, TimeUnit.MILLISECONDS); + } + } + } diff --git a/server/src/test/java/org/opensearch/search/SearchServiceTests.java b/server/src/test/java/org/opensearch/search/SearchServiceTests.java index ecc28470b0eb2..9c140cda6ccc8 100644 --- a/server/src/test/java/org/opensearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/opensearch/search/SearchServiceTests.java @@ -1414,6 +1414,7 @@ public void testOpenReaderContext() { searchService.createPitReaderContext(new ShardId(resolveIndex("index"), 0), TimeValue.timeValueMinutes(between(1, 10)), future); future.actionGet(); assertThat(searchService.getActiveContexts(), equalTo(1)); + assertThat(searchService.getAllPITReaderContexts().size(), equalTo(1)); assertTrue(searchService.freeReaderContext(future.actionGet())); } @@ -1442,6 +1443,7 @@ public void testDeletePitReaderContext() { contextIds.add(pitSearchContextIdForNode); assertThat(searchService.getActiveContexts(), equalTo(1)); + assertThat(searchService.getAllPITReaderContexts().size(), equalTo(1)); DeletePitResponse deletePitResponse = searchService.freeReaderContextsIfFound(contextIds); assertTrue(deletePitResponse.getDeletePitResults().get(0).isSuccessful()); // assert true for reader context not found @@ -1451,19 +1453,6 @@ public void testDeletePitReaderContext() { assertFalse(searchService.freeReaderContext(future.actionGet())); } - public void testDeleteAllPitReaderContexts() { - createIndex("index"); - SearchService searchService = getInstanceFromNode(SearchService.class); - PlainActionFuture future = new PlainActionFuture<>(); - searchService.createPitReaderContext(new ShardId(resolveIndex("index"), 0), TimeValue.timeValueMinutes(between(1, 10)), future); - future.actionGet(); - searchService.createPitReaderContext(new ShardId(resolveIndex("index"), 0), TimeValue.timeValueMinutes(between(1, 10)), future); - future.actionGet(); - assertThat(searchService.getActiveContexts(), equalTo(2)); - searchService.freeAllPitContexts(); - assertThat(searchService.getActiveContexts(), equalTo(0)); - } - public void testPitContextMaxKeepAlive() { createIndex("index"); SearchService searchService = getInstanceFromNode(SearchService.class); @@ -1484,6 +1473,7 @@ public void testPitContextMaxKeepAlive() { ex.getMessage() ); assertThat(searchService.getActiveContexts(), equalTo(0)); + assertThat(searchService.getAllPITReaderContexts().size(), equalTo(0)); } public void testUpdatePitId() { @@ -1506,6 +1496,7 @@ public void testUpdatePitId() { assertTrue(updateResponse.getKeepAlive() == updateRequest.getKeepAlive()); assertTrue(updateResponse.getPitId().equalsIgnoreCase("pitId")); assertThat(searchService.getActiveContexts(), equalTo(1)); + assertThat(searchService.getAllPITReaderContexts().size(), equalTo(1)); assertTrue(searchService.freeReaderContext(future.actionGet())); } @@ -1538,6 +1529,7 @@ public void testUpdatePitIdMaxKeepAlive() { ex.getMessage() ); assertThat(searchService.getActiveContexts(), equalTo(1)); + assertThat(searchService.getAllPITReaderContexts().size(), equalTo(1)); assertTrue(searchService.freeReaderContext(future.actionGet())); } @@ -1558,5 +1550,6 @@ public void testUpdatePitIdWithInvalidReaderId() { assertEquals("No search context found for id [" + id.getId() + "]", ex.getMessage()); assertThat(searchService.getActiveContexts(), equalTo(0)); + assertThat(searchService.getAllPITReaderContexts().size(), equalTo(0)); } } From 6f23300bf604076999095d2802b9913af0faf98b Mon Sep 17 00:00:00 2001 From: Sachin Kale Date: Wed, 3 Aug 2022 13:37:59 +0530 Subject: [PATCH 059/104] [Remote Store] Add support to add nested settings for remote store (#4060) * Add support to add nested settings for remote store Signed-off-by: Sachin Kale --- .../java/org/opensearch/cluster/metadata/IndexMetadata.java | 6 +++--- .../org/opensearch/common/settings/IndexScopedSettings.java | 2 +- .../src/main/java/org/opensearch/index/IndexSettings.java | 2 +- .../main/java/org/opensearch/snapshots/RestoreService.java | 4 ++-- .../test/java/org/opensearch/index/IndexSettingsTests.java | 6 +++--- .../java/org/opensearch/index/shard/IndexShardTests.java | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index 442137fb70e1f..c534eeccd4e2b 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -283,12 +283,12 @@ public Iterator> settings() { Property.Final ); - public static final String SETTING_REMOTE_STORE = "index.remote_store"; + public static final String SETTING_REMOTE_STORE_ENABLED = "index.remote_store.enabled"; /** * Used to specify if the index data should be persisted in the remote store. */ - public static final Setting INDEX_REMOTE_STORE_SETTING = Setting.boolSetting( - SETTING_REMOTE_STORE, + public static final Setting INDEX_REMOTE_STORE_ENABLED_SETTING = Setting.boolSetting( + SETTING_REMOTE_STORE_ENABLED, false, Property.IndexScope, Property.Final diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index a2aac1f2c54c5..b0efaab733231 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -222,7 +222,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { FeatureFlags.REPLICATION_TYPE, IndexMetadata.INDEX_REPLICATION_TYPE_SETTING, FeatureFlags.REMOTE_STORE, - IndexMetadata.INDEX_REMOTE_STORE_SETTING + IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING ); public static final IndexScopedSettings DEFAULT_SCOPED_SETTINGS = new IndexScopedSettings(Settings.EMPTY, BUILT_IN_INDEX_SETTINGS); diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 7c9b9755a2434..d1363540709c8 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -718,7 +718,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti this.indexMetadata = indexMetadata; numberOfShards = settings.getAsInt(IndexMetadata.SETTING_NUMBER_OF_SHARDS, null); replicationType = ReplicationType.parseString(settings.get(IndexMetadata.SETTING_REPLICATION_TYPE)); - isRemoteStoreEnabled = settings.getAsBoolean(IndexMetadata.SETTING_REMOTE_STORE, false); + isRemoteStoreEnabled = settings.getAsBoolean(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, false); this.searchThrottled = INDEX_SEARCH_THROTTLED.get(settings); this.queryStringLenient = QUERY_STRING_LENIENT_SETTING.get(settings); diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index a14de23d81d09..417498467622a 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -116,7 +116,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_VERSION_CREATED; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_VERSION_UPGRADED; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; import static org.opensearch.common.util.set.Sets.newHashSet; import static org.opensearch.snapshots.SnapshotUtils.filterIndices; @@ -227,7 +227,7 @@ public ClusterState execute(ClusterState currentState) { logger.warn("Remote store restore is not supported for non-existent index. Skipping: {}", index); continue; } - if (currentIndexMetadata.getSettings().getAsBoolean(SETTING_REMOTE_STORE, false)) { + if (currentIndexMetadata.getSettings().getAsBoolean(SETTING_REMOTE_STORE_ENABLED, false)) { if (currentIndexMetadata.getState() != IndexMetadata.State.CLOSE) { throw new IllegalStateException( "cannot restore index [" diff --git a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java index 4b3dc041b9f54..b15b959abe6c5 100644 --- a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java +++ b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java @@ -770,7 +770,7 @@ public void testRemoteStoreExplicitSetting() { "index", Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) - .put(IndexMetadata.SETTING_REMOTE_STORE, true) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) .build() ); IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); @@ -784,12 +784,12 @@ public void testUpdateRemoteStoreFails() { IllegalArgumentException error = expectThrows( IllegalArgumentException.class, () -> settings.updateSettings( - Settings.builder().put("index.remote_store", randomBoolean()).build(), + Settings.builder().put("index.remote_store.enabled", randomBoolean()).build(), Settings.builder(), Settings.builder(), "index" ) ); - assertEquals(error.getMessage(), "final index setting [index.remote_store], not updateable"); + assertEquals(error.getMessage(), "final index setting [index.remote_store.enabled], not updateable"); } } diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index b65528ae207fd..57c2289c848ef 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -2660,7 +2660,7 @@ public void restoreShard( public void testRestoreShardFromRemoteStore() throws IOException { IndexShard target = newStartedShard( true, - Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE, true).build(), + Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true).build(), new InternalEngineFactory() ); From 27c54934cde5c90ad2e4497c96fc8dc8d7fb251d Mon Sep 17 00:00:00 2001 From: Peter Zhu Date: Wed, 3 Aug 2022 14:48:54 -0400 Subject: [PATCH 060/104] Correctly ignore depandabot branches during push (#4077) Signed-off-by: Peter Zhu --- .github/workflows/gradle-check.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index dec5ee15d0bea..cbaa7fa10fbb6 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -2,9 +2,9 @@ name: Gradle Check (Jenkins) on: push: branches-ignore: - - 'backport/*' - - 'create-pull-request/*' - - 'dependabot/*' + - 'backport/**' + - 'create-pull-request/**' + - 'dependabot/**' pull_request_target: types: [opened, synchronize, reopened] From 3ab0d1f398e0b22b90237a6dd8db97e6376530dc Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Wed, 3 Aug 2022 12:16:51 -0700 Subject: [PATCH 061/104] Fix the bug that masterOperation(with task param) is bypassed (#4103) The method `protected void masterOperation(Task task, Request request, ClusterState state, ActionListener listener)` was not properly deprecated by the commit https://github.com/opensearch-project/OpenSearch/commit/2a1b239c2af8d7f333a404c66467a50dc0a52c80 / PR https://github.com/opensearch-project/OpenSearch/pull/403. There is a problem that it doesn't make any difference to whether overriding the method `void masterOperation(Task task, Request request, ClusterState state, ActionListener listener)` or not. The reason is the only usage for the method changed to call another method `clusterManagerOperation(task, request, clusterState, l)`, which is the default implementation for the method `masterOperation(with task param)`. There is no other usage for the method `masterOperation()` so it's bypassed. In the commit: - Change delegation model for method `clusterManagerOperation(with task param)` to call `masterOperation(with task param)`, according to the comment below https://github.com/opensearch-project/OpenSearch/pull/4103#discussion_r936946699 - Add a test to validate the backwards compatibility, which copied and modified from an existing test. Signed-off-by: Tianli Feng --- .../TransportClusterManagerNodeAction.java | 11 ++++-- .../info/TransportClusterInfoAction.java | 1 + ...ransportClusterManagerNodeActionTests.java | 37 +++++++++++++++++++ .../java/org/opensearch/test/TestCluster.java | 1 + 4 files changed, 47 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java index d65ba3eddf776..a97f4ffe555b6 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java @@ -125,22 +125,27 @@ protected void masterOperation(Request request, ClusterState state, ActionListen throw new UnsupportedOperationException("Must be overridden"); } + // TODO: Add abstract keyword after removing the deprecated masterOperation() protected void clusterManagerOperation(Request request, ClusterState state, ActionListener listener) throws Exception { masterOperation(request, state, listener); } - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerOperation(Task, ClusterManagerNodeRequest, ClusterState, ActionListener)} */ + /** + * Override this operation if access to the task parameter is needed + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerOperation(Task, ClusterManagerNodeRequest, ClusterState, ActionListener)} + */ @Deprecated protected void masterOperation(Task task, Request request, ClusterState state, ActionListener listener) throws Exception { - clusterManagerOperation(task, request, state, listener); + clusterManagerOperation(request, state, listener); } /** * Override this operation if access to the task parameter is needed */ + // TODO: Change the implementation to call 'clusterManagerOperation(request...)' after removing the deprecated masterOperation() protected void clusterManagerOperation(Task task, Request request, ClusterState state, ActionListener listener) throws Exception { - clusterManagerOperation(request, state, listener); + masterOperation(task, request, state, listener); } protected boolean localExecute(Request request) { diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/info/TransportClusterInfoAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/info/TransportClusterInfoAction.java index 1411ff7b30695..c43256a61e8b4 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/info/TransportClusterInfoAction.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/info/TransportClusterInfoAction.java @@ -88,6 +88,7 @@ protected final void clusterManagerOperation(final Request request, final Cluste doClusterManagerOperation(request, concreteIndices, state, listener); } + // TODO: Add abstract keyword after removing the deprecated doMasterOperation() protected void doClusterManagerOperation( Request request, String[] concreteIndices, diff --git a/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java index 9ec9e656257d6..1195ed2590b1e 100644 --- a/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java @@ -274,6 +274,43 @@ protected void clusterManagerOperation(Task task, Request request, ClusterState } } + /* The test is copied from testLocalOperationWithoutBlocks() + to validate the backwards compatibility for the deprecated method masterOperation(with task parameter). */ + public void testDeprecatedMasterOperationWithTaskParameterCanBeCalled() throws ExecutionException, InterruptedException { + final boolean clusterManagerOperationFailure = randomBoolean(); + + Request request = new Request(); + PlainActionFuture listener = new PlainActionFuture<>(); + + final Exception exception = new Exception(); + final Response response = new Response(); + + setState(clusterService, ClusterStateCreationUtils.state(localNode, localNode, allNodes)); + + new Action("internal:testAction", transportService, clusterService, threadPool) { + @Override + protected void masterOperation(Task task, Request request, ClusterState state, ActionListener listener) { + if (clusterManagerOperationFailure) { + listener.onFailure(exception); + } else { + listener.onResponse(response); + } + } + }.execute(request, listener); + assertTrue(listener.isDone()); + + if (clusterManagerOperationFailure) { + try { + listener.get(); + fail("Expected exception but returned proper result"); + } catch (ExecutionException ex) { + assertThat(ex.getCause(), equalTo(exception)); + } + } else { + assertThat(listener.get(), equalTo(response)); + } + } + public void testLocalOperationWithBlocks() throws ExecutionException, InterruptedException { final boolean retryableBlock = randomBoolean(); final boolean unblockBeforeTimeout = randomBoolean(); diff --git a/test/framework/src/main/java/org/opensearch/test/TestCluster.java b/test/framework/src/main/java/org/opensearch/test/TestCluster.java index c2e90f0369e6c..478b692fb06ef 100644 --- a/test/framework/src/main/java/org/opensearch/test/TestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/TestCluster.java @@ -127,6 +127,7 @@ public void assertAfterTest() throws Exception { /** * Returns the number of data and cluster-manager eligible nodes in the cluster. */ + // TODO: Add abstract keyword after removing the deprecated numDataAndMasterNodes() public int numDataAndClusterManagerNodes() { return numDataAndMasterNodes(); } From 7af8e37f81c0a124712a8cf86dab973df26f906f Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Wed, 3 Aug 2022 15:02:39 -0700 Subject: [PATCH 062/104] Fix isAheadOf logic for ReplicationCheckpoint comparison (#4112) Signed-off-by: Suraj Singh --- .../replication/checkpoint/ReplicationCheckpoint.java | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java index abcef1bd91944..8afb5bd055636 100644 --- a/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java @@ -125,10 +125,13 @@ public int hashCode() { } /** - * Checks if other is aheadof current replication point by comparing segmentInfosVersion. Returns true for null + * Checks if current replication checkpoint is AheadOf `other` replication checkpoint point by first comparing + * primaryTerm followed by segmentInfosVersion. Returns true when `other` is null. */ public boolean isAheadOf(@Nullable ReplicationCheckpoint other) { - return other == null || segmentInfosVersion > other.getSegmentInfosVersion() || primaryTerm > other.getPrimaryTerm(); + return other == null + || primaryTerm > other.getPrimaryTerm() + || (primaryTerm == other.getPrimaryTerm() && segmentInfosVersion > other.getSegmentInfosVersion()); } @Override From ba9cdcdb262176a4947f13471b27450d8ac6fe63 Mon Sep 17 00:00:00 2001 From: Xue Zhou <85715413+xuezhou25@users.noreply.github.com> Date: Wed, 3 Aug 2022 18:19:28 -0700 Subject: [PATCH 063/104] Deprecate and rename abstract methods in interfaces that contain 'master' in name (#4121) Signed-off-by: Xue Zhou --- .../LocalNodeClusterManagerListener.java | 8 ++--- .../cluster/LocalNodeMasterListener.java | 29 +++++++++++++++++++ .../opensearch/cluster/ack/AckedRequest.java | 14 ++++++++- .../service/ClusterApplierServiceTests.java | 4 +-- 4 files changed, 48 insertions(+), 7 deletions(-) diff --git a/server/src/main/java/org/opensearch/cluster/LocalNodeClusterManagerListener.java b/server/src/main/java/org/opensearch/cluster/LocalNodeClusterManagerListener.java index c86aa00a6f2a2..c07dcc5daaee6 100644 --- a/server/src/main/java/org/opensearch/cluster/LocalNodeClusterManagerListener.java +++ b/server/src/main/java/org/opensearch/cluster/LocalNodeClusterManagerListener.java @@ -42,21 +42,21 @@ public interface LocalNodeClusterManagerListener extends ClusterStateListener { /** * Called when local node is elected to be the cluster-manager */ - void onMaster(); + void onClusterManager(); /** * Called when the local node used to be the cluster-manager, a new cluster-manager was elected and it's no longer the local node. */ - void offMaster(); + void offClusterManager(); @Override default void clusterChanged(ClusterChangedEvent event) { final boolean wasClusterManager = event.previousState().nodes().isLocalNodeElectedClusterManager(); final boolean isClusterManager = event.localNodeClusterManager(); if (wasClusterManager == false && isClusterManager) { - onMaster(); + onClusterManager(); } else if (wasClusterManager && isClusterManager == false) { - offMaster(); + offClusterManager(); } } } diff --git a/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java b/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java index eebfb60d8472d..31c0b294b8004 100644 --- a/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java +++ b/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java @@ -41,4 +41,33 @@ @Deprecated public interface LocalNodeMasterListener extends LocalNodeClusterManagerListener { + /** + * Called when local node is elected to be the cluster-manager. + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #onClusterManager()} + */ + @Deprecated + void onMaster(); + + /** + * Called when the local node used to be the cluster-manager, a new cluster-manager was elected and it's no longer the local node. + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #offClusterManager()} + */ + @Deprecated + void offMaster(); + + /** + * Called when local node is elected to be the cluster-manager. + */ + @Override + default void onClusterManager() { + onMaster(); + } + + /** + * Called when the local node used to be the cluster-manager, a new cluster-manager was elected and it's no longer the local node. + */ + @Override + default void offClusterManager() { + offMaster(); + } } diff --git a/server/src/main/java/org/opensearch/cluster/ack/AckedRequest.java b/server/src/main/java/org/opensearch/cluster/ack/AckedRequest.java index a3f74cb45a880..750f4b177cb86 100644 --- a/server/src/main/java/org/opensearch/cluster/ack/AckedRequest.java +++ b/server/src/main/java/org/opensearch/cluster/ack/AckedRequest.java @@ -48,6 +48,18 @@ public interface AckedRequest { /** * Returns the timeout for the request to be completed on the cluster-manager node + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerNodeTimeout()} */ - TimeValue masterNodeTimeout(); + @Deprecated + default TimeValue masterNodeTimeout() { + throw new UnsupportedOperationException("Must be overridden"); + } + + /** + * Returns the timeout for the request to be completed on the cluster-manager node + */ + // TODO: Remove default implementation after removing the deprecated masterNodeTimeout() + default TimeValue clusterManagerNodeTimeout() { + return masterNodeTimeout(); + } } diff --git a/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java b/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java index 62dae0622eb85..e6da768650088 100644 --- a/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java @@ -299,12 +299,12 @@ public void testLocalNodeClusterManagerListenerCallbacks() { AtomicBoolean isClusterManager = new AtomicBoolean(); timedClusterApplierService.addLocalNodeClusterManagerListener(new LocalNodeClusterManagerListener() { @Override - public void onMaster() { + public void onClusterManager() { isClusterManager.set(true); } @Override - public void offMaster() { + public void offClusterManager() { isClusterManager.set(false); } }); From d09c2857e50f9f6631c4da27ae046f28581d9398 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Thu, 4 Aug 2022 09:23:44 -0700 Subject: [PATCH 064/104] Disable shard idle with segment replication. (#4118) This change disables shard idle when segment replication is enabled. Primary shards will only push out new segments on refresh, we do not want to block this based on search behavior. Replicas will only refresh on an externally provided SegmentInfos, so we do not want search requests to hang waiting for a refresh. Signed-off-by: Marc Handalian --- .../index/engine/NRTReplicationEngine.java | 12 ++ .../opensearch/index/shard/IndexShard.java | 4 + .../SegmentReplicationIndexShardTests.java | 59 ++++++++ ...enSearchIndexLevelReplicationTestCase.java | 10 ++ .../index/shard/IndexShardTestCase.java | 130 ++++++++++++++++++ 5 files changed, 215 insertions(+) create mode 100644 server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java index af175be286b13..c94313d2b8cee 100644 --- a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java @@ -193,6 +193,18 @@ protected ReferenceManager getReferenceManager(Search return readerManager; } + /** + * Refreshing of this engine will only happen internally when a new set of segments is received. The engine will ignore external + * refresh attempts so we can return false here. Further Engine's existing implementation reads DirectoryReader.isCurrent after acquiring a searcher. + * With this Engine's NRTReplicationReaderManager, This will use StandardDirectoryReader's implementation which determines if the reader is current by + * comparing the on-disk SegmentInfos version against the one in the reader, which at refresh points will always return isCurrent false and then refreshNeeded true. + * Even if this method returns refresh as needed, we ignore it and only ever refresh with incoming SegmentInfos. + */ + @Override + public boolean refreshNeeded() { + return false; + } + @Override public Closeable acquireHistoryRetentionLock() { throw new UnsupportedOperationException("Not implemented"); diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 9694f3dd37f80..b05eec4304cd5 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -3778,6 +3778,10 @@ public boolean scheduledRefresh() { if (listenerNeedsRefresh == false // if we have a listener that is waiting for a refresh we need to force it && isSearchIdle() && indexSettings.isExplicitRefresh() == false + && indexSettings.isSegRepEnabled() == false + // Indices with segrep enabled will never wait on a refresh and ignore shard idle. Primary shards push out new segments only + // after a refresh, so we don't want to wait for a search to trigger that cycle. Replicas will only refresh after receiving + // a new set of segments. && active.get()) { // it must be active otherwise we might not free up segment memory once the shard became inactive // lets skip this refresh since we are search idle and // don't necessarily need to refresh. the next searcher access will register a refreshListener and that will diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java new file mode 100644 index 0000000000000..3fcf6116b11a2 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java @@ -0,0 +1,59 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.shard; + +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.engine.NRTReplicationEngineFactory; +import org.opensearch.index.replication.OpenSearchIndexLevelReplicationTestCase; +import org.opensearch.indices.replication.common.ReplicationType; + +public class SegmentReplicationIndexShardTests extends OpenSearchIndexLevelReplicationTestCase { + + private static final Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build(); + + public void testIgnoreShardIdle() throws Exception { + try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { + shards.startAll(); + final IndexShard primary = shards.getPrimary(); + final IndexShard replica = shards.getReplicas().get(0); + + final int numDocs = shards.indexDocs(randomInt(10)); + primary.refresh("test"); + replicateSegments(primary, shards.getReplicas()); + shards.assertAllEqual(numDocs); + + primary.scheduledRefresh(); + replica.scheduledRefresh(); + + primary.awaitShardSearchActive(b -> assertFalse("A new RefreshListener should not be registered", b)); + replica.awaitShardSearchActive(b -> assertFalse("A new RefreshListener should not be registered", b)); + + // Update the search_idle setting, this will put both shards into search idle. + Settings updatedSettings = Settings.builder() + .put(settings) + .put(IndexSettings.INDEX_SEARCH_IDLE_AFTER.getKey(), TimeValue.ZERO) + .build(); + primary.indexSettings().getScopedSettings().applySettings(updatedSettings); + replica.indexSettings().getScopedSettings().applySettings(updatedSettings); + + primary.scheduledRefresh(); + replica.scheduledRefresh(); + + // Shards without segrep will register a new RefreshListener on the engine and return true when registered, + // assert with segrep enabled that awaitShardSearchActive does not register a listener. + primary.awaitShardSearchActive(b -> assertFalse("A new RefreshListener should not be registered", b)); + replica.awaitShardSearchActive(b -> assertFalse("A new RefreshListener should not be registered", b)); + } + } +} diff --git a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java index 5e22a7c145a39..a588909085160 100644 --- a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java @@ -139,6 +139,16 @@ protected ReplicationGroup createGroup(int replicas, Settings settings) throws I return new ReplicationGroup(metadata); } + protected ReplicationGroup createGroup(int replicas, Settings settings, EngineFactory engineFactory) throws IOException { + IndexMetadata metadata = buildIndexMetadata(replicas, settings, indexMapping); + return new ReplicationGroup(metadata) { + @Override + protected EngineFactory getEngineFactory(ShardRouting routing) { + return engineFactory; + } + }; + } + protected IndexMetadata buildIndexMetadata(int replicas) throws IOException { return buildIndexMetadata(replicas, indexMapping); } diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index c5ee54450cce2..7dedc572ff19b 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -33,9 +33,15 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.junit.Assert; +import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; import org.opensearch.Version; +import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.support.PlainActionFuture; @@ -58,6 +64,7 @@ import org.opensearch.common.lucene.uid.Versions; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.internal.io.IOUtils; @@ -82,6 +89,7 @@ import org.opensearch.index.similarity.SimilarityService; import org.opensearch.index.snapshots.IndexShardSnapshotStatus; import org.opensearch.index.store.Store; +import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.index.translog.Translog; import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.indices.breaker.HierarchyCircuitBreakerService; @@ -94,7 +102,14 @@ import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.recovery.RecoveryTarget; import org.opensearch.indices.recovery.StartRecoveryRequest; +import org.opensearch.indices.replication.CheckpointInfoResponse; +import org.opensearch.indices.replication.GetSegmentFilesResponse; +import org.opensearch.indices.replication.SegmentReplicationSource; +import org.opensearch.indices.replication.SegmentReplicationTarget; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; +import org.opensearch.indices.replication.common.CopyState; +import org.opensearch.indices.replication.common.ReplicationCollection; import org.opensearch.indices.replication.common.ReplicationListener; import org.opensearch.indices.replication.common.ReplicationState; import org.opensearch.repositories.IndexId; @@ -112,6 +127,7 @@ import java.util.HashSet; import java.util.List; import java.util.Set; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; @@ -122,6 +138,7 @@ import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; +import static org.mockito.Mockito.mock; import static org.opensearch.cluster.routing.TestShardRouting.newShardRouting; /** @@ -1133,4 +1150,117 @@ public static Engine.Warmer createTestWarmer(IndexSettings indexSettings) { } }; } + + /** + * Segment Replication specific test method - Replicate segments to a list of replicas from a given primary. + * This test will use a real {@link SegmentReplicationTarget} for each replica with a mock {@link SegmentReplicationSource} that + * writes all segments directly to the target. + */ + public final void replicateSegments(IndexShard primaryShard, List replicaShards) throws IOException, InterruptedException { + final CountDownLatch countDownLatch = new CountDownLatch(replicaShards.size()); + Store.MetadataSnapshot primaryMetadata; + try (final GatedCloseable segmentInfosSnapshot = primaryShard.getSegmentInfosSnapshot()) { + final SegmentInfos primarySegmentInfos = segmentInfosSnapshot.get(); + primaryMetadata = primaryShard.store().getMetadata(primarySegmentInfos); + } + final CopyState copyState = new CopyState(ReplicationCheckpoint.empty(primaryShard.shardId), primaryShard); + + final ReplicationCollection replicationCollection = new ReplicationCollection<>(logger, threadPool); + final SegmentReplicationSource source = new SegmentReplicationSource() { + @Override + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener listener + ) { + listener.onResponse( + new CheckpointInfoResponse( + copyState.getCheckpoint(), + copyState.getMetadataSnapshot(), + copyState.getInfosBytes(), + copyState.getPendingDeleteFiles() + ) + ); + } + + @Override + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List filesToFetch, + Store store, + ActionListener listener + ) { + try ( + final ReplicationCollection.ReplicationRef replicationRef = replicationCollection.get( + replicationId + ) + ) { + writeFileChunks(replicationRef.get(), primaryShard, filesToFetch.toArray(new StoreFileMetadata[] {})); + } catch (IOException e) { + listener.onFailure(e); + } + listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); + } + }; + + for (IndexShard replica : replicaShards) { + final SegmentReplicationTarget target = new SegmentReplicationTarget( + ReplicationCheckpoint.empty(replica.shardId), + replica, + source, + new ReplicationListener() { + @Override + public void onDone(ReplicationState state) { + try (final GatedCloseable snapshot = replica.getSegmentInfosSnapshot()) { + final SegmentInfos replicaInfos = snapshot.get(); + final Store.MetadataSnapshot replicaMetadata = replica.store().getMetadata(replicaInfos); + final Store.RecoveryDiff recoveryDiff = primaryMetadata.recoveryDiff(replicaMetadata); + assertTrue(recoveryDiff.missing.isEmpty()); + assertTrue(recoveryDiff.different.isEmpty()); + assertEquals(recoveryDiff.identical.size(), primaryMetadata.size()); + assertEquals(primaryMetadata.getCommitUserData(), replicaMetadata.getCommitUserData()); + } catch (Exception e) { + throw ExceptionsHelper.convertToRuntime(e); + } + countDownLatch.countDown(); + } + + @Override + public void onFailure(ReplicationState state, OpenSearchException e, boolean sendShardFailure) { + logger.error("Unexpected replication failure in test", e); + Assert.fail("test replication should not fail: " + e); + } + } + ); + replicationCollection.start(target, TimeValue.timeValueMillis(5000)); + target.startReplication(new ActionListener<>() { + @Override + public void onResponse(Void o) { + replicationCollection.markAsDone(target.getId()); + } + + @Override + public void onFailure(Exception e) { + replicationCollection.fail(target.getId(), new OpenSearchException("Segment Replication failed", e), true); + } + }); + } + countDownLatch.await(3, TimeUnit.SECONDS); + } + + private void writeFileChunks(SegmentReplicationTarget target, IndexShard primary, StoreFileMetadata[] files) throws IOException { + for (StoreFileMetadata md : files) { + try (IndexInput in = primary.store().directory().openInput(md.name(), IOContext.READONCE)) { + int pos = 0; + while (pos < md.length()) { + int length = between(1, Math.toIntExact(md.length() - pos)); + byte[] buffer = new byte[length]; + in.readBytes(buffer, 0, length); + target.writeFileChunk(md, pos, new BytesArray(buffer), pos + length == md.length(), 0, mock(ActionListener.class)); + pos += length; + } + } + } + } } From 797a5d98722a306bcb0e2e9f47fa054db67098d6 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Thu, 4 Aug 2022 17:44:57 -0700 Subject: [PATCH 065/104] Fix waitUntil refresh policy for segrep enabled indices. (#4137) Signed-off-by: Marc Handalian --- .../java/org/opensearch/index/WaitUntilRefreshIT.java | 9 ++++++++- .../opensearch/index/engine/NRTReplicationEngine.java | 6 ++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/index/WaitUntilRefreshIT.java b/server/src/internalClusterTest/java/org/opensearch/index/WaitUntilRefreshIT.java index e38b128c04fde..a75057356fe8a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/WaitUntilRefreshIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/WaitUntilRefreshIT.java @@ -42,7 +42,10 @@ import org.opensearch.action.support.WriteRequest.RefreshPolicy; import org.opensearch.action.update.UpdateResponse; import org.opensearch.client.Requests; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.plugins.Plugin; import org.opensearch.rest.RestStatus; import org.opensearch.script.MockScriptPlugin; @@ -74,7 +77,11 @@ public class WaitUntilRefreshIT extends OpenSearchIntegTestCase { @Override public Settings indexSettings() { // Use a shorter refresh interval to speed up the tests. We'll be waiting on this interval several times. - return Settings.builder().put(super.indexSettings()).put("index.refresh_interval", "40ms").build(); + final Settings.Builder builder = Settings.builder().put(super.indexSettings()).put("index.refresh_interval", "40ms"); + if (FeatureFlags.isEnabled(FeatureFlags.REPLICATION_TYPE)) { + builder.put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT); + } + return builder.build(); } @Before diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java index c94313d2b8cee..1f9306cf51e1e 100644 --- a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java @@ -69,6 +69,12 @@ public NRTReplicationEngine(EngineConfig engineConfig) { this.completionStatsCache = new CompletionStatsCache(() -> acquireSearcher("completion_stats")); this.readerManager = readerManager; this.readerManager.addListener(completionStatsCache); + for (ReferenceManager.RefreshListener listener : engineConfig.getExternalRefreshListener()) { + this.readerManager.addListener(listener); + } + for (ReferenceManager.RefreshListener listener : engineConfig.getInternalRefreshListener()) { + this.readerManager.addListener(listener); + } final Map userData = store.readLastCommittedSegmentsInfo().getUserData(); final String translogUUID = Objects.requireNonNull(userData.get(Translog.TRANSLOG_UUID_KEY)); translogManagerRef = new WriteOnlyTranslogManager( From 42058aa76a18f531ceaac7deb4e94944cf02f463 Mon Sep 17 00:00:00 2001 From: Sachin Kale Date: Fri, 5 Aug 2022 10:29:22 +0530 Subject: [PATCH 066/104] [Remote Store] Add rest endpoint for remote store restore (#3576) * Add rest endpoint for remote store restore Signed-off-by: Sachin Kale --- .../client/RestHighLevelClientTests.java | 3 +- .../api/remote_store.restore.json | 34 +++++ .../org/opensearch/action/ActionModule.java | 12 ++ .../restore/RestoreRemoteStoreAction.java | 26 ++++ .../restore/RestoreRemoteStoreResponse.java | 126 ++++++++++++++++++ .../TransportRestoreRemoteStoreAction.java | 103 ++++++++++++++ .../remotestore/restore/package-info.java | 2 +- .../restore/RestoreClusterStateListener.java | 33 +++-- .../TransportRestoreSnapshotAction.java | 7 +- .../opensearch/client/ClusterAdminClient.java | 7 + .../client/support/AbstractClient.java | 8 ++ .../cluster/RestRestoreRemoteStoreAction.java | 50 +++++++ .../RestoreRemoteStoreResponseTests.java | 45 +++++++ 13 files changed, 441 insertions(+), 15 deletions(-) create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/api/remote_store.restore.json create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreResponse.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/TransportRestoreRemoteStoreAction.java create mode 100644 server/src/main/java/org/opensearch/rest/action/admin/cluster/RestRestoreRemoteStoreAction.java create mode 100644 server/src/test/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreResponseTests.java diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java index efcc13921c398..3da0f81023f72 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java @@ -885,7 +885,8 @@ public void testApiNamingConventions() throws Exception { "nodes.hot_threads", "nodes.usage", "nodes.reload_secure_settings", - "search_shards", }; + "search_shards", + "remote_store.restore", }; List booleanReturnMethods = Arrays.asList("security.enable_user", "security.disable_user", "security.change_password"); Set deprecatedMethods = new HashSet<>(); deprecatedMethods.add("indices.force_merge"); diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/remote_store.restore.json b/rest-api-spec/src/main/resources/rest-api-spec/api/remote_store.restore.json new file mode 100644 index 0000000000000..6af49f75b9f6e --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/remote_store.restore.json @@ -0,0 +1,34 @@ +{ + "remote_store.restore":{ + "documentation":{ + "url": "https://opensearch.org/docs/latest/opensearch/rest-api/remote-store#restore", + "description":"Restores from remote store." + }, + "stability":"experimental", + "url":{ + "paths":[ + { + "path":"/_remotestore/_restore", + "methods":[ + "POST" + ] + } + ] + }, + "params":{ + "cluster_manager_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to cluster-manager node" + }, + "wait_for_completion":{ + "type":"boolean", + "description":"Should this request wait until the operation has completed before returning", + "default":false + } + }, + "body":{ + "description":"A comma separated list of index IDs", + "required":true + } + } +} diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index 2f470d7603869..052d2ec2b5764 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -61,6 +61,8 @@ import org.opensearch.action.admin.cluster.node.usage.TransportNodesUsageAction; import org.opensearch.action.admin.cluster.remote.RemoteInfoAction; import org.opensearch.action.admin.cluster.remote.TransportRemoteInfoAction; +import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreAction; +import org.opensearch.action.admin.cluster.remotestore.restore.TransportRestoreRemoteStoreAction; import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryAction; import org.opensearch.action.admin.cluster.repositories.cleanup.TransportCleanupRepositoryAction; import org.opensearch.action.admin.cluster.repositories.delete.DeleteRepositoryAction; @@ -267,6 +269,7 @@ import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsFilter; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.seqno.RetentionLeaseActions; import org.opensearch.indices.SystemIndices; import org.opensearch.indices.breaker.CircuitBreakerService; @@ -314,6 +317,7 @@ import org.opensearch.rest.action.admin.cluster.RestPutStoredScriptAction; import org.opensearch.rest.action.admin.cluster.RestReloadSecureSettingsAction; import org.opensearch.rest.action.admin.cluster.RestRemoteClusterInfoAction; +import org.opensearch.rest.action.admin.cluster.RestRestoreRemoteStoreAction; import org.opensearch.rest.action.admin.cluster.RestRestoreSnapshotAction; import org.opensearch.rest.action.admin.cluster.RestSnapshotsStatusAction; import org.opensearch.rest.action.admin.cluster.RestVerifyRepositoryAction; @@ -668,6 +672,9 @@ public void reg actions.register(GetAllPitsAction.INSTANCE, TransportGetAllPitsAction.class); actions.register(DeletePitAction.INSTANCE, TransportDeletePitAction.class); + // Remote Store + actions.register(RestoreRemoteStoreAction.INSTANCE, TransportRestoreRemoteStoreAction.class); + return unmodifiableMap(actions.getRegistry()); } @@ -853,6 +860,11 @@ public void initRestHandlers(Supplier nodesInCluster) { } } registerHandler.accept(new RestCatAction(catActions)); + + // Remote Store APIs + if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE)) { + registerHandler.accept(new RestRestoreRemoteStoreAction()); + } } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreAction.java new file mode 100644 index 0000000000000..46b1bc14e8537 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreAction.java @@ -0,0 +1,26 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.remotestore.restore; + +import org.opensearch.action.ActionType; + +/** + * Restore remote store action + * + * @opensearch.internal + */ +public final class RestoreRemoteStoreAction extends ActionType { + + public static final RestoreRemoteStoreAction INSTANCE = new RestoreRemoteStoreAction(); + public static final String NAME = "cluster:admin/remotestore/restore"; + + private RestoreRemoteStoreAction() { + super(NAME, RestoreRemoteStoreResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreResponse.java new file mode 100644 index 0000000000000..80bf96b6b2562 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreResponse.java @@ -0,0 +1,126 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.remotestore.restore; + +import org.opensearch.action.ActionResponse; +import org.opensearch.common.Nullable; +import org.opensearch.common.ParseField; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.xcontent.ConstructingObjectParser; +import org.opensearch.common.xcontent.ToXContentObject; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.rest.RestStatus; +import org.opensearch.snapshots.RestoreInfo; + +import java.io.IOException; +import java.util.Objects; + +import static org.opensearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * Contains information about remote store restores + * + * @opensearch.internal + */ +public final class RestoreRemoteStoreResponse extends ActionResponse implements ToXContentObject { + + @Nullable + private final RestoreInfo restoreInfo; + + public RestoreRemoteStoreResponse(@Nullable RestoreInfo restoreInfo) { + this.restoreInfo = restoreInfo; + } + + public RestoreRemoteStoreResponse(StreamInput in) throws IOException { + super(in); + restoreInfo = RestoreInfo.readOptionalRestoreInfo(in); + } + + /** + * Returns restore information if remote store restore was completed before this method returned, null otherwise + * + * @return restore information or null + */ + public RestoreInfo getRestoreInfo() { + return restoreInfo; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalWriteable(restoreInfo); + } + + public RestStatus status() { + if (restoreInfo == null) { + return RestStatus.ACCEPTED; + } + return restoreInfo.status(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (restoreInfo != null) { + builder.field("remote_store"); + restoreInfo.toXContent(builder, params); + } else { + builder.field("accepted", true); + } + builder.endObject(); + return builder; + } + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "restore_remote_store", + true, + v -> { + RestoreInfo restoreInfo = (RestoreInfo) v[0]; + Boolean accepted = (Boolean) v[1]; + assert (accepted == null && restoreInfo != null) || (accepted != null && accepted && restoreInfo == null) : "accepted: [" + + accepted + + "], restoreInfo: [" + + restoreInfo + + "]"; + return new RestoreRemoteStoreResponse(restoreInfo); + } + ); + + static { + PARSER.declareObject( + optionalConstructorArg(), + (parser, context) -> RestoreInfo.fromXContent(parser), + new ParseField("remote_store") + ); + PARSER.declareBoolean(optionalConstructorArg(), new ParseField("accepted")); + } + + public static RestoreRemoteStoreResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + RestoreRemoteStoreResponse that = (RestoreRemoteStoreResponse) o; + return Objects.equals(restoreInfo, that.restoreInfo); + } + + @Override + public int hashCode() { + return Objects.hash(restoreInfo); + } + + @Override + public String toString() { + return "RestoreRemoteStoreResponse{" + "restoreInfo=" + restoreInfo + '}'; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/TransportRestoreRemoteStoreAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/TransportRestoreRemoteStoreAction.java new file mode 100644 index 0000000000000..7304ba25717ac --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/TransportRestoreRemoteStoreAction.java @@ -0,0 +1,103 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.remotestore.restore; + +import org.opensearch.action.ActionListener; +import org.opensearch.action.admin.cluster.snapshots.restore.RestoreClusterStateListener; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.snapshots.RestoreService; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; + +/** + * Transport action for restore remote store operation + * + * @opensearch.internal + */ +public final class TransportRestoreRemoteStoreAction extends TransportClusterManagerNodeAction< + RestoreRemoteStoreRequest, + RestoreRemoteStoreResponse> { + private final RestoreService restoreService; + + @Inject + public TransportRestoreRemoteStoreAction( + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + RestoreService restoreService, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + super( + RestoreRemoteStoreAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + RestoreRemoteStoreRequest::new, + indexNameExpressionResolver + ); + this.restoreService = restoreService; + } + + @Override + protected String executor() { + return ThreadPool.Names.GENERIC; + } + + @Override + protected RestoreRemoteStoreResponse read(StreamInput in) throws IOException { + return new RestoreRemoteStoreResponse(in); + } + + @Override + protected ClusterBlockException checkBlock(RestoreRemoteStoreRequest request, ClusterState state) { + // Restoring a remote store might change the global state and create/change an index, + // so we need to check for METADATA_WRITE and WRITE blocks + ClusterBlockException blockException = state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + if (blockException != null) { + return blockException; + } + return state.blocks().globalBlockedException(ClusterBlockLevel.WRITE); + + } + + @Override + protected void clusterManagerOperation( + final RestoreRemoteStoreRequest request, + final ClusterState state, + final ActionListener listener + ) { + restoreService.restoreFromRemoteStore( + request, + ActionListener.delegateFailure(listener, (delegatedListener, restoreCompletionResponse) -> { + if (restoreCompletionResponse.getRestoreInfo() == null && request.waitForCompletion()) { + RestoreClusterStateListener.createAndRegisterListener( + clusterService, + restoreCompletionResponse, + delegatedListener, + RestoreRemoteStoreResponse::new + ); + } else { + delegatedListener.onResponse(new RestoreRemoteStoreResponse(restoreCompletionResponse.getRestoreInfo())); + } + }) + ); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/package-info.java index 363b7179f3c6c..10348f5ccfe6e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/package-info.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/package-info.java @@ -6,5 +6,5 @@ * compatible open source license. */ -/** Restore Snapshot transport handler. */ +/** Restore remote store transport handler. */ package org.opensearch.action.admin.cluster.remotestore.restore; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreClusterStateListener.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreClusterStateListener.java index 7d2ca99e3dbf5..d0f78e85e26a5 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreClusterStateListener.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreClusterStateListener.java @@ -35,6 +35,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.action.ActionListener; +import org.opensearch.action.ActionResponse; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterStateListener; import org.opensearch.cluster.RestoreInProgress; @@ -44,6 +45,8 @@ import org.opensearch.snapshots.RestoreInfo; import org.opensearch.snapshots.RestoreService; +import java.util.function.Function; + import static org.opensearch.snapshots.RestoreService.restoreInProgress; /** @@ -51,22 +54,27 @@ * * @opensearch.internal */ -public class RestoreClusterStateListener implements ClusterStateListener { +public class RestoreClusterStateListener implements ClusterStateListener { private static final Logger logger = LogManager.getLogger(RestoreClusterStateListener.class); private final ClusterService clusterService; private final String uuid; - private final ActionListener listener; + private final String restoreIdentifier; + private final ActionListener listener; + private final Function actionResponseFactory; private RestoreClusterStateListener( ClusterService clusterService, RestoreService.RestoreCompletionResponse response, - ActionListener listener + ActionListener listener, + Function actionResponseFactory ) { this.clusterService = clusterService; this.uuid = response.getUuid(); + this.restoreIdentifier = response.getSnapshot() != null ? response.getSnapshot().getSnapshotId().getName() : "remote_store"; this.listener = listener; + this.actionResponseFactory = actionResponseFactory; } @Override @@ -78,23 +86,23 @@ public void clusterChanged(ClusterChangedEvent changedEvent) { // on the current cluster-manager and as such it might miss some intermediary cluster states due to batching. // Clean up listener in that case and acknowledge completion of restore operation to client. clusterService.removeListener(this); - listener.onResponse(new RestoreSnapshotResponse((RestoreInfo) null)); + listener.onResponse(actionResponseFactory.apply(null)); } else if (newEntry == null) { clusterService.removeListener(this); ImmutableOpenMap shards = prevEntry.shards(); - assert prevEntry.state().completed() : "expected completed snapshot state but was " + prevEntry.state(); + assert prevEntry.state().completed() : "expected completed snapshot/remote store restore state but was " + prevEntry.state(); assert RestoreService.completed(shards) : "expected all restore entries to be completed"; RestoreInfo ri = new RestoreInfo( - prevEntry.snapshot().getSnapshotId().getName(), + restoreIdentifier, prevEntry.indices(), shards.size(), shards.size() - RestoreService.failedShards(shards) ); - RestoreSnapshotResponse response = new RestoreSnapshotResponse(ri); - logger.debug("restore of [{}] completed", prevEntry.snapshot().getSnapshotId()); + T response = actionResponseFactory.apply(ri); + logger.debug("restore of [{}] completed", restoreIdentifier); listener.onResponse(response); } else { - // restore not completed yet, wait for next cluster state update + logger.debug("restore not completed yet, wait for next cluster state update"); } } @@ -102,11 +110,12 @@ public void clusterChanged(ClusterChangedEvent changedEvent) { * Creates a cluster state listener and registers it with the cluster service. The listener passed as a * parameter will be called when the restore is complete. */ - public static void createAndRegisterListener( + public static void createAndRegisterListener( ClusterService clusterService, RestoreService.RestoreCompletionResponse response, - ActionListener listener + ActionListener listener, + Function actionResponseFactory ) { - clusterService.addListener(new RestoreClusterStateListener(clusterService, response, listener)); + clusterService.addListener(new RestoreClusterStateListener(clusterService, response, listener, actionResponseFactory)); } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java index e7d95b9e40880..c2f79b2a27157 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java @@ -109,7 +109,12 @@ protected void clusterManagerOperation( ) { restoreService.restoreSnapshot(request, ActionListener.delegateFailure(listener, (delegatedListener, restoreCompletionResponse) -> { if (restoreCompletionResponse.getRestoreInfo() == null && request.waitForCompletion()) { - RestoreClusterStateListener.createAndRegisterListener(clusterService, restoreCompletionResponse, delegatedListener); + RestoreClusterStateListener.createAndRegisterListener( + clusterService, + restoreCompletionResponse, + delegatedListener, + RestoreSnapshotResponse::new + ); } else { delegatedListener.onResponse(new RestoreSnapshotResponse(restoreCompletionResponse.getRestoreInfo())); } diff --git a/server/src/main/java/org/opensearch/client/ClusterAdminClient.java b/server/src/main/java/org/opensearch/client/ClusterAdminClient.java index f4eaa979ff18c..7a7b98bf724f6 100644 --- a/server/src/main/java/org/opensearch/client/ClusterAdminClient.java +++ b/server/src/main/java/org/opensearch/client/ClusterAdminClient.java @@ -62,6 +62,8 @@ import org.opensearch.action.admin.cluster.node.usage.NodesUsageRequest; import org.opensearch.action.admin.cluster.node.usage.NodesUsageRequestBuilder; import org.opensearch.action.admin.cluster.node.usage.NodesUsageResponse; +import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; +import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreResponse; import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequest; import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequestBuilder; import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryResponse; @@ -577,6 +579,11 @@ public interface ClusterAdminClient extends OpenSearchClient { */ void restoreSnapshot(RestoreSnapshotRequest request, ActionListener listener); + /** + * Restores from remote store. + */ + void restoreRemoteStore(RestoreRemoteStoreRequest request, ActionListener listener); + /** * Restores a snapshot. */ diff --git a/server/src/main/java/org/opensearch/client/support/AbstractClient.java b/server/src/main/java/org/opensearch/client/support/AbstractClient.java index f99454a8a8913..7084a856ab3d1 100644 --- a/server/src/main/java/org/opensearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/opensearch/client/support/AbstractClient.java @@ -77,6 +77,9 @@ import org.opensearch.action.admin.cluster.node.usage.NodesUsageRequest; import org.opensearch.action.admin.cluster.node.usage.NodesUsageRequestBuilder; import org.opensearch.action.admin.cluster.node.usage.NodesUsageResponse; +import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreAction; +import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; +import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreResponse; import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryAction; import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequest; import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequestBuilder; @@ -1109,6 +1112,11 @@ public void restoreSnapshot(RestoreSnapshotRequest request, ActionListener listener) { + execute(RestoreRemoteStoreAction.INSTANCE, request, listener); + } + @Override public RestoreSnapshotRequestBuilder prepareRestoreSnapshot(String repository, String snapshot) { return new RestoreSnapshotRequestBuilder(this, RestoreSnapshotAction.INSTANCE, repository, snapshot); diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestRestoreRemoteStoreAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestRestoreRemoteStoreAction.java new file mode 100644 index 0000000000000..fca6745167bb4 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestRestoreRemoteStoreAction.java @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.admin.cluster; + +import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; +import org.opensearch.client.node.NodeClient; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestToXContentListener; + +import java.io.IOException; +import java.util.List; + +import static java.util.Collections.singletonList; +import static org.opensearch.rest.RestRequest.Method.POST; + +/** + * Restores data from remote store + * + * @opensearch.api + */ +public final class RestRestoreRemoteStoreAction extends BaseRestHandler { + + @Override + public List routes() { + return singletonList(new Route(POST, "/_remotestore/_restore")); + } + + @Override + public String getName() { + return "restore_remote_store_action"; + } + + @Override + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + RestoreRemoteStoreRequest restoreRemoteStoreRequest = new RestoreRemoteStoreRequest(); + restoreRemoteStoreRequest.masterNodeTimeout( + request.paramAsTime("cluster_manager_timeout", restoreRemoteStoreRequest.masterNodeTimeout()) + ); + restoreRemoteStoreRequest.waitForCompletion(request.paramAsBoolean("wait_for_completion", false)); + request.applyContentParser(p -> restoreRemoteStoreRequest.source(p.mapOrdered())); + return channel -> client.admin().cluster().restoreRemoteStore(restoreRemoteStoreRequest, new RestToXContentListener<>(channel)); + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreResponseTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreResponseTests.java new file mode 100644 index 0000000000000..b52729c1bbfd7 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreResponseTests.java @@ -0,0 +1,45 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.remotestore.restore; + +import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.snapshots.RestoreInfo; +import org.opensearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class RestoreRemoteStoreResponseTests extends AbstractXContentTestCase { + + @Override + protected RestoreRemoteStoreResponse createTestInstance() { + if (randomBoolean()) { + String name = "remote_store"; + List indices = new ArrayList<>(); + indices.add("test0"); + indices.add("test1"); + int totalShards = randomIntBetween(1, 1000); + int successfulShards = randomIntBetween(0, totalShards); + return new RestoreRemoteStoreResponse(new RestoreInfo(name, indices, totalShards, successfulShards)); + } else { + return new RestoreRemoteStoreResponse((RestoreInfo) null); + } + } + + @Override + protected RestoreRemoteStoreResponse doParseInstance(XContentParser parser) throws IOException { + return RestoreRemoteStoreResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} From fbe93d4743e37033c211fff4fbf07fa71b5b66d9 Mon Sep 17 00:00:00 2001 From: Sruti Parthiban Date: Thu, 4 Aug 2022 22:05:23 -0700 Subject: [PATCH 067/104] Task consumer Integration (#2293) * Integrate task consumers to capture task resource information during unregister. Add consumer that logs topN expensive search tasks Signed-off-by: sruti1312 --- .../src/docker/config/log4j2.properties | 10 ++ distribution/src/config/log4j2.properties | 37 ++++++ .../ImportLog4jPropertiesTaskTests.java | 2 +- .../test/resources/config/log4j2.properties | 35 ++++++ .../action/search/SearchShardTask.java | 21 ++++ .../common/settings/ClusterSettings.java | 4 +- .../search/internal/ShardSearchRequest.java | 16 ++- .../search/query/QuerySearchRequest.java | 5 +- .../org/opensearch/tasks/TaskManager.java | 43 +++++++ .../SearchShardTaskDetailsLogMessage.java | 67 +++++++++++ .../tasks/consumer/TopNSearchTasksLogger.java | 100 +++++++++++++++++ .../tasks/consumer/package-info.java | 12 ++ .../transport/TransportService.java | 15 ++- .../node/tasks/TaskManagerTestCase.java | 10 +- .../index/IndexingSlowLogTests.java | 2 +- .../opensearch/index/SearchSlowLogTests.java | 2 +- ...SearchShardTaskDetailsLogMessageTests.java | 61 ++++++++++ .../consumer/TopNSearchTasksLoggerTests.java | 105 ++++++++++++++++++ ...enSearchIndexLevelReplicationTestCase.java | 2 + .../test/transport/MockTransportService.java | 10 +- 20 files changed, 545 insertions(+), 14 deletions(-) create mode 100644 server/src/main/java/org/opensearch/tasks/consumer/SearchShardTaskDetailsLogMessage.java create mode 100644 server/src/main/java/org/opensearch/tasks/consumer/TopNSearchTasksLogger.java create mode 100644 server/src/main/java/org/opensearch/tasks/consumer/package-info.java create mode 100644 server/src/test/java/org/opensearch/tasks/consumer/SearchShardTaskDetailsLogMessageTests.java create mode 100644 server/src/test/java/org/opensearch/tasks/consumer/TopNSearchTasksLoggerTests.java diff --git a/distribution/docker/src/docker/config/log4j2.properties b/distribution/docker/src/docker/config/log4j2.properties index a8c54137c7fd2..761478a9fdc6e 100644 --- a/distribution/docker/src/docker/config/log4j2.properties +++ b/distribution/docker/src/docker/config/log4j2.properties @@ -53,3 +53,13 @@ logger.index_indexing_slowlog.name = index.indexing.slowlog.index logger.index_indexing_slowlog.level = trace logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling logger.index_indexing_slowlog.additivity = false + +appender.task_detailslog_rolling.type = Console +appender.task_detailslog_rolling.name = task_detailslog_rolling +appender.task_detailslog_rolling.layout.type = OpenSearchJsonLayout +appender.task_detailslog_rolling.layout.type_name = task_detailslog + +logger.task_detailslog_rolling.name = task.detailslog +logger.task_detailslog_rolling.level = trace +logger.task_detailslog_rolling.appenderRef.task_detailslog_rolling.ref = task_detailslog_rolling +logger.task_detailslog_rolling.additivity = false diff --git a/distribution/src/config/log4j2.properties b/distribution/src/config/log4j2.properties index 4820396c79eb7..bb27aaf2e22e6 100644 --- a/distribution/src/config/log4j2.properties +++ b/distribution/src/config/log4j2.properties @@ -195,3 +195,40 @@ logger.index_indexing_slowlog.level = trace logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling_old.ref = index_indexing_slowlog_rolling_old logger.index_indexing_slowlog.additivity = false + +######## Task details log JSON #################### +appender.task_detailslog_rolling.type = RollingFile +appender.task_detailslog_rolling.name = task_detailslog_rolling +appender.task_detailslog_rolling.fileName = ${sys:opensearch.logs.base_path}${sys:file.separator}${sys:opensearch.logs.cluster_name}_task_detailslog.json +appender.task_detailslog_rolling.filePermissions = rw-r----- +appender.task_detailslog_rolling.layout.type = OpenSearchJsonLayout +appender.task_detailslog_rolling.layout.type_name = task_detailslog +appender.task_detailslog_rolling.layout.opensearchmessagefields=taskId,type,action,description,start_time_millis,resource_stats,metadata + +appender.task_detailslog_rolling.filePattern = ${sys:opensearch.logs.base_path}${sys:file.separator}${sys:opensearch.logs.cluster_name}_task_detailslog-%i.json.gz +appender.task_detailslog_rolling.policies.type = Policies +appender.task_detailslog_rolling.policies.size.type = SizeBasedTriggeringPolicy +appender.task_detailslog_rolling.policies.size.size = 1GB +appender.task_detailslog_rolling.strategy.type = DefaultRolloverStrategy +appender.task_detailslog_rolling.strategy.max = 4 +################################################# +######## Task details log - old style pattern #### +appender.task_detailslog_rolling_old.type = RollingFile +appender.task_detailslog_rolling_old.name = task_detailslog_rolling_old +appender.task_detailslog_rolling_old.fileName = ${sys:opensearch.logs.base_path}${sys:file.separator}${sys:opensearch.logs.cluster_name}_task_detailslog.log +appender.task_detailslog_rolling_old.filePermissions = rw-r----- +appender.task_detailslog_rolling_old.layout.type = PatternLayout +appender.task_detailslog_rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n + +appender.task_detailslog_rolling_old.filePattern = ${sys:opensearch.logs.base_path}${sys:file.separator}${sys:opensearch.logs.cluster_name}_task_detailslog-%i.log.gz +appender.task_detailslog_rolling_old.policies.type = Policies +appender.task_detailslog_rolling_old.policies.size.type = SizeBasedTriggeringPolicy +appender.task_detailslog_rolling_old.policies.size.size = 1GB +appender.task_detailslog_rolling_old.strategy.type = DefaultRolloverStrategy +appender.task_detailslog_rolling_old.strategy.max = 4 +################################################# +logger.task_detailslog_rolling.name = task.detailslog +logger.task_detailslog_rolling.level = trace +logger.task_detailslog_rolling.appenderRef.task_detailslog_rolling.ref = task_detailslog_rolling +logger.task_detailslog_rolling.appenderRef.task_detailslog_rolling_old.ref = task_detailslog_rolling_old +logger.task_detailslog_rolling.additivity = false diff --git a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/ImportLog4jPropertiesTaskTests.java b/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/ImportLog4jPropertiesTaskTests.java index 7f67e08c66b9e..96544d3297ad4 100644 --- a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/ImportLog4jPropertiesTaskTests.java +++ b/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/ImportLog4jPropertiesTaskTests.java @@ -67,7 +67,7 @@ public void testImportLog4jPropertiesTask() throws IOException { Properties properties = new Properties(); properties.load(Files.newInputStream(taskInput.getOpenSearchConfig().resolve(ImportLog4jPropertiesTask.LOG4J_PROPERTIES))); assertThat(properties, is(notNullValue())); - assertThat(properties.entrySet(), hasSize(137)); + assertThat(properties.entrySet(), hasSize(165)); assertThat(properties.get("appender.rolling.layout.type"), equalTo("OpenSearchJsonLayout")); assertThat( properties.get("appender.deprecation_rolling.fileName"), diff --git a/distribution/tools/upgrade-cli/src/test/resources/config/log4j2.properties b/distribution/tools/upgrade-cli/src/test/resources/config/log4j2.properties index b9ad71121165a..4b92d3fc62376 100644 --- a/distribution/tools/upgrade-cli/src/test/resources/config/log4j2.properties +++ b/distribution/tools/upgrade-cli/src/test/resources/config/log4j2.properties @@ -176,3 +176,38 @@ logger.index_indexing_slowlog.level = trace logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling_old.ref = index_indexing_slowlog_rolling_old logger.index_indexing_slowlog.additivity = false + +######## Task details log JSON #################### +appender.task_detailslog_rolling.type = RollingFile +appender.task_detailslog_rolling.name = task_detailslog_rolling +appender.task_detailslog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_task_detailslog.json +appender.task_detailslog_rolling.layout.type = ESJsonLayout +appender.task_detailslog_rolling.layout.type_name = task_detailslog +appender.task_detailslog_rolling.layout.esmessagefields=taskId,type,action,description,start_time_millis,resource_stats,metadata + +appender.task_detailslog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_task_detailslog-%i.json.gz +appender.task_detailslog_rolling.policies.type = Policies +appender.task_detailslog_rolling.policies.size.type = SizeBasedTriggeringPolicy +appender.task_detailslog_rolling.policies.size.size = 1GB +appender.task_detailslog_rolling.strategy.type = DefaultRolloverStrategy +appender.task_detailslog_rolling.strategy.max = 4 +################################################# +######## Task details log - old style pattern #### +appender.task_detailslog_rolling_old.type = RollingFile +appender.task_detailslog_rolling_old.name = task_detailslog_rolling_old +appender.task_detailslog_rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_task_detailslog.log +appender.task_detailslog_rolling_old.layout.type = PatternLayout +appender.task_detailslog_rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n + +appender.task_detailslog_rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_task_detailslog-%i.log.gz +appender.task_detailslog_rolling_old.policies.type = Policies +appender.task_detailslog_rolling_old.policies.size.type = SizeBasedTriggeringPolicy +appender.task_detailslog_rolling_old.policies.size.size = 1GB +appender.task_detailslog_rolling_old.strategy.type = DefaultRolloverStrategy +appender.task_detailslog_rolling_old.strategy.max = 4 +################################################# +logger.task_detailslog_rolling.name = task.detailslog +logger.task_detailslog_rolling.level = trace +logger.task_detailslog_rolling.appenderRef.task_detailslog_rolling.ref = task_detailslog_rolling +logger.task_detailslog_rolling.appenderRef.task_detailslog_rolling_old.ref = task_detailslog_rolling_old +logger.task_detailslog_rolling.additivity = false diff --git a/server/src/main/java/org/opensearch/action/search/SearchShardTask.java b/server/src/main/java/org/opensearch/action/search/SearchShardTask.java index 57831896db714..c9d0d6e2d3d47 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchShardTask.java +++ b/server/src/main/java/org/opensearch/action/search/SearchShardTask.java @@ -32,12 +32,14 @@ package org.opensearch.action.search; +import org.opensearch.common.MemoizedSupplier; import org.opensearch.search.fetch.ShardFetchSearchRequest; import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.TaskId; import java.util.Map; +import java.util.function.Supplier; /** * Task storing information about a currently running search shard request. @@ -46,9 +48,28 @@ * @opensearch.internal */ public class SearchShardTask extends CancellableTask { + // generating metadata in a lazy way since source can be quite big + private final MemoizedSupplier metadataSupplier; public SearchShardTask(long id, String type, String action, String description, TaskId parentTaskId, Map headers) { + this(id, type, action, description, parentTaskId, headers, () -> ""); + } + + public SearchShardTask( + long id, + String type, + String action, + String description, + TaskId parentTaskId, + Map headers, + Supplier metadataSupplier + ) { super(id, type, action, description, parentTaskId, headers); + this.metadataSupplier = new MemoizedSupplier<>(metadataSupplier); + } + + public String getTaskMetadata() { + return metadataSupplier.get(); } @Override diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index fe322992cd3e5..971fb518ff1da 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -41,6 +41,7 @@ import org.opensearch.index.ShardIndexingPressureMemoryManager; import org.opensearch.index.ShardIndexingPressureSettings; import org.opensearch.index.ShardIndexingPressureStore; +import org.opensearch.tasks.TaskManager; import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.watcher.ResourceWatcherService; import org.opensearch.action.admin.cluster.configuration.TransportAddVotingConfigExclusionsAction; @@ -577,7 +578,8 @@ public void apply(Settings value, Settings current, Settings previous) { ShardIndexingPressureMemoryManager.SUCCESSFUL_REQUEST_ELAPSED_TIMEOUT, ShardIndexingPressureMemoryManager.MAX_OUTSTANDING_REQUESTS, IndexingPressure.MAX_INDEXING_BYTES, - TaskResourceTrackingService.TASK_RESOURCE_TRACKING_ENABLED + TaskResourceTrackingService.TASK_RESOURCE_TRACKING_ENABLED, + TaskManager.TASK_RESOURCE_CONSUMERS_ENABLED ) ) ); diff --git a/server/src/main/java/org/opensearch/search/internal/ShardSearchRequest.java b/server/src/main/java/org/opensearch/search/internal/ShardSearchRequest.java index a4d7b2bed516c..828c2f8c78d69 100644 --- a/server/src/main/java/org/opensearch/search/internal/ShardSearchRequest.java +++ b/server/src/main/java/org/opensearch/search/internal/ShardSearchRequest.java @@ -51,6 +51,7 @@ import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.xcontent.ToXContent; import org.opensearch.index.Index; import org.opensearch.index.query.BoolQueryBuilder; import org.opensearch.index.query.MatchNoneQueryBuilder; @@ -71,6 +72,7 @@ import org.opensearch.transport.TransportRequest; import java.io.IOException; +import java.util.Collections; import java.util.Arrays; import java.util.Map; import java.util.function.Function; @@ -85,6 +87,8 @@ * @opensearch.internal */ public class ShardSearchRequest extends TransportRequest implements IndicesRequest { + public static final ToXContent.Params FORMAT_PARAMS = new ToXContent.MapParams(Collections.singletonMap("pretty", "false")); + private final String clusterAlias; private final ShardId shardId; private final int numberOfShards; @@ -501,7 +505,7 @@ public String getClusterAlias() { @Override public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { - return new SearchShardTask(id, type, action, getDescription(), parentTaskId, headers); + return new SearchShardTask(id, type, action, getDescription(), parentTaskId, headers, this::getMetadataSupplier); } @Override @@ -510,6 +514,16 @@ public String getDescription() { return "shardId[" + shardId() + "]"; } + public String getMetadataSupplier() { + StringBuilder sb = new StringBuilder(); + if (source != null) { + sb.append("source[").append(source.toString(FORMAT_PARAMS)).append("]"); + } else { + sb.append("source[]"); + } + return sb.toString(); + } + public Rewriteable getRewriteable() { return new RequestRewritable(this); } diff --git a/server/src/main/java/org/opensearch/search/query/QuerySearchRequest.java b/server/src/main/java/org/opensearch/search/query/QuerySearchRequest.java index ae2f9e8fab989..ca74942decb50 100644 --- a/server/src/main/java/org/opensearch/search/query/QuerySearchRequest.java +++ b/server/src/main/java/org/opensearch/search/query/QuerySearchRequest.java @@ -123,7 +123,7 @@ public IndicesOptions indicesOptions() { @Override public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { - return new SearchShardTask(id, type, action, getDescription(), parentTaskId, headers); + return new SearchShardTask(id, type, action, getDescription(), parentTaskId, headers, this::getMetadataSupplier); } public String getDescription() { @@ -137,4 +137,7 @@ public String getDescription() { return sb.toString(); } + public String getMetadataSupplier() { + return shardSearchRequest().getMetadataSupplier(); + } } diff --git a/server/src/main/java/org/opensearch/tasks/TaskManager.java b/server/src/main/java/org/opensearch/tasks/TaskManager.java index 01b6b8ea603bf..334cde81dfb6a 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskManager.java +++ b/server/src/main/java/org/opensearch/tasks/TaskManager.java @@ -51,6 +51,8 @@ import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; @@ -58,6 +60,7 @@ import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.common.util.concurrent.ConcurrentMapLong; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.tasks.consumer.TopNSearchTasksLogger; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TcpChannel; @@ -75,6 +78,7 @@ import java.util.concurrent.Semaphore; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Consumer; import java.util.stream.Collectors; import java.util.stream.StreamSupport; @@ -92,6 +96,15 @@ public class TaskManager implements ClusterStateApplier { private static final TimeValue WAIT_FOR_COMPLETION_POLL = timeValueMillis(100); + public static final String TASK_RESOURCE_CONSUMERS_ATTRIBUTES = "task_resource_consumers.enabled"; + + public static final Setting TASK_RESOURCE_CONSUMERS_ENABLED = Setting.boolSetting( + TASK_RESOURCE_CONSUMERS_ATTRIBUTES, + false, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + /** * Rest headers that are copied to the task */ @@ -116,10 +129,26 @@ public class TaskManager implements ClusterStateApplier { private final Map channelPendingTaskTrackers = ConcurrentCollections.newConcurrentMap(); private final SetOnce cancellationService = new SetOnce<>(); + private volatile boolean taskResourceConsumersEnabled; + private final Set> taskResourceConsumer; + + public static TaskManager createTaskManagerWithClusterSettings( + Settings settings, + ClusterSettings clusterSettings, + ThreadPool threadPool, + Set taskHeaders + ) { + final TaskManager taskManager = new TaskManager(settings, threadPool, taskHeaders); + clusterSettings.addSettingsUpdateConsumer(TASK_RESOURCE_CONSUMERS_ENABLED, taskManager::setTaskResourceConsumersEnabled); + return taskManager; + } + public TaskManager(Settings settings, ThreadPool threadPool, Set taskHeaders) { this.threadPool = threadPool; this.taskHeaders = new ArrayList<>(taskHeaders); this.maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings); + this.taskResourceConsumersEnabled = TASK_RESOURCE_CONSUMERS_ENABLED.get(settings); + this.taskResourceConsumer = Set.of(new TopNSearchTasksLogger(settings)); } public void setTaskResultsService(TaskResultsService taskResultsService) { @@ -135,6 +164,10 @@ public void setTaskResourceTrackingService(TaskResourceTrackingService taskResou this.taskResourceTrackingService.set(taskResourceTrackingService); } + public void setTaskResourceConsumersEnabled(boolean taskResourceConsumersEnabled) { + this.taskResourceConsumersEnabled = taskResourceConsumersEnabled; + } + /** * Registers a task without parent task */ @@ -240,6 +273,16 @@ public Task unregister(Task task) { // Decrement the task's self-thread as part of unregistration. task.decrementResourceTrackingThreads(); + if (taskResourceConsumersEnabled) { + for (Consumer taskConsumer : taskResourceConsumer) { + try { + taskConsumer.accept(task); + } catch (Exception e) { + logger.error("error encountered when updating the consumer", e); + } + } + } + if (task instanceof CancellableTask) { CancellableTaskHolder holder = cancellableTasks.remove(task.getId()); if (holder != null) { diff --git a/server/src/main/java/org/opensearch/tasks/consumer/SearchShardTaskDetailsLogMessage.java b/server/src/main/java/org/opensearch/tasks/consumer/SearchShardTaskDetailsLogMessage.java new file mode 100644 index 0000000000000..1755db3ab4ae8 --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/consumer/SearchShardTaskDetailsLogMessage.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.tasks.consumer; + +import org.opensearch.action.search.SearchShardTask; +import org.opensearch.common.logging.OpenSearchLogMessage; + +import java.util.HashMap; +import java.util.Map; + +/** + * Search shard task information that will be extracted from Task and converted into + * format that will be logged + * + * @opensearch.internal + */ +public final class SearchShardTaskDetailsLogMessage extends OpenSearchLogMessage { + SearchShardTaskDetailsLogMessage(SearchShardTask task) { + super(prepareMap(task), message(task)); + } + + private static Map prepareMap(SearchShardTask task) { + Map messageFields = new HashMap<>(); + messageFields.put("taskId", task.getId()); + messageFields.put("type", task.getType()); + messageFields.put("action", task.getAction()); + messageFields.put("description", task.getDescription()); + messageFields.put("start_time_millis", task.getStartTime()); + messageFields.put("parentTaskId", task.getParentTaskId()); + messageFields.put("resource_stats", task.getResourceStats()); + messageFields.put("metadata", task.getTaskMetadata()); + return messageFields; + } + + // Message will be used in plaintext logs + private static String message(SearchShardTask task) { + StringBuilder sb = new StringBuilder(); + sb.append("taskId:[") + .append(task.getId()) + .append("], ") + .append("type:[") + .append(task.getType()) + .append("], ") + .append("action:[") + .append(task.getAction()) + .append("], ") + .append("description:[") + .append(task.getDescription()) + .append("], ") + .append("start_time_millis:[") + .append(task.getStartTime()) + .append("], ") + .append("resource_stats:[") + .append(task.getResourceStats()) + .append("], ") + .append("metadata:[") + .append(task.getTaskMetadata()) + .append("]"); + return sb.toString(); + } +} diff --git a/server/src/main/java/org/opensearch/tasks/consumer/TopNSearchTasksLogger.java b/server/src/main/java/org/opensearch/tasks/consumer/TopNSearchTasksLogger.java new file mode 100644 index 0000000000000..dd7e200d7f4b2 --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/consumer/TopNSearchTasksLogger.java @@ -0,0 +1,100 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.tasks.consumer; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.search.SearchShardTask; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.tasks.ResourceStats; +import org.opensearch.tasks.Task; + +import java.util.Comparator; +import java.util.PriorityQueue; +import java.util.Queue; +import java.util.function.Consumer; + +/** + * A simple listener that logs resource information of high memory consuming search tasks + * + * @opensearch.internal + */ +public class TopNSearchTasksLogger implements Consumer { + public static final String TASK_DETAILS_LOG_PREFIX = "task.detailslog"; + public static final String LOG_TOP_QUERIES_SIZE = "cluster.task.consumers.top_n.size"; + public static final String LOG_TOP_QUERIES_FREQUENCY = "cluster.task.consumers.top_n.frequency"; + + private static final Logger SEARCH_TASK_DETAILS_LOGGER = LogManager.getLogger(TASK_DETAILS_LOG_PREFIX + ".search"); + + // number of memory expensive search tasks that are logged + private static final Setting LOG_TOP_QUERIES_SIZE_SETTING = Setting.intSetting( + LOG_TOP_QUERIES_SIZE, + 10, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + // frequency in which memory expensive search tasks are logged + private static final Setting LOG_TOP_QUERIES_FREQUENCY_SETTING = Setting.timeSetting( + LOG_TOP_QUERIES_FREQUENCY, + TimeValue.timeValueSeconds(60L), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + private final int topQueriesSize; + private final long topQueriesLogFrequencyInNanos; + private final Queue> topQueries; + private long lastReportedTimeInNanos = System.nanoTime(); + + public TopNSearchTasksLogger(Settings settings) { + this.topQueriesSize = LOG_TOP_QUERIES_SIZE_SETTING.get(settings); + this.topQueriesLogFrequencyInNanos = LOG_TOP_QUERIES_FREQUENCY_SETTING.get(settings).getNanos(); + this.topQueries = new PriorityQueue<>(topQueriesSize, Comparator.comparingLong(Tuple::v1)); + } + + /** + * Called when task is unregistered and task has resource stats present. + */ + @Override + public void accept(Task task) { + if (task instanceof SearchShardTask) { + recordSearchTask((SearchShardTask) task); + } + } + + private synchronized void recordSearchTask(SearchShardTask searchTask) { + final long memory_in_bytes = searchTask.getTotalResourceUtilization(ResourceStats.MEMORY); + if (System.nanoTime() - lastReportedTimeInNanos >= topQueriesLogFrequencyInNanos) { + publishTopNEvents(); + lastReportedTimeInNanos = System.nanoTime(); + } + if (topQueries.size() >= topQueriesSize && topQueries.peek().v1() < memory_in_bytes) { + // evict the element + topQueries.poll(); + } + if (topQueries.size() < topQueriesSize) { + topQueries.offer(new Tuple<>(memory_in_bytes, searchTask)); + } + } + + private void publishTopNEvents() { + logTopResourceConsumingQueries(); + topQueries.clear(); + } + + private void logTopResourceConsumingQueries() { + for (Tuple topQuery : topQueries) { + SEARCH_TASK_DETAILS_LOGGER.info(new SearchShardTaskDetailsLogMessage(topQuery.v2())); + } + } +} diff --git a/server/src/main/java/org/opensearch/tasks/consumer/package-info.java b/server/src/main/java/org/opensearch/tasks/consumer/package-info.java new file mode 100644 index 0000000000000..40219a1cead5b --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/consumer/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Support for adding consumers to consume task related information. + */ +package org.opensearch.tasks.consumer; diff --git a/server/src/main/java/org/opensearch/transport/TransportService.java b/server/src/main/java/org/opensearch/transport/TransportService.java index 1a280f2475e5d..aaba06196bc59 100644 --- a/server/src/main/java/org/opensearch/transport/TransportService.java +++ b/server/src/main/java/org/opensearch/transport/TransportService.java @@ -210,7 +210,7 @@ public TransportService( setTracerLogInclude(TransportSettings.TRACE_LOG_INCLUDE_SETTING.get(settings)); setTracerLogExclude(TransportSettings.TRACE_LOG_EXCLUDE_SETTING.get(settings)); tracerLog = Loggers.getLogger(logger, ".tracer"); - taskManager = createTaskManager(settings, threadPool, taskHeaders); + taskManager = createTaskManager(settings, clusterSettings, threadPool, taskHeaders); this.interceptor = transportInterceptor; this.asyncSender = interceptor.interceptSender(this::sendRequestInternal); this.remoteClusterClient = DiscoveryNode.isRemoteClusterClient(settings); @@ -246,8 +246,17 @@ public TaskManager getTaskManager() { return taskManager; } - protected TaskManager createTaskManager(Settings settings, ThreadPool threadPool, Set taskHeaders) { - return new TaskManager(settings, threadPool, taskHeaders); + protected TaskManager createTaskManager( + Settings settings, + ClusterSettings clusterSettings, + ThreadPool threadPool, + Set taskHeaders + ) { + if (clusterSettings != null) { + return TaskManager.createTaskManagerWithClusterSettings(settings, clusterSettings, threadPool, taskHeaders); + } else { + return new TaskManager(settings, threadPool, taskHeaders); + } } /** diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java index fd6f5d17a3a80..68cf69e30f8a6 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java @@ -53,6 +53,7 @@ import org.opensearch.common.io.stream.Writeable; import org.opensearch.common.lease.Releasable; import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.transport.BoundTransportAddress; import org.opensearch.common.util.PageCacheRecycler; @@ -218,11 +219,16 @@ public TestNode(String name, ThreadPool threadPool, Settings settings) { Collections.emptySet() ) { @Override - protected TaskManager createTaskManager(Settings settings, ThreadPool threadPool, Set taskHeaders) { + protected TaskManager createTaskManager( + Settings settings, + ClusterSettings clusterSettings, + ThreadPool threadPool, + Set taskHeaders + ) { if (MockTaskManager.USE_MOCK_TASK_MANAGER_SETTING.get(settings)) { return new MockTaskManager(settings, threadPool, taskHeaders); } else { - return super.createTaskManager(settings, threadPool, taskHeaders); + return super.createTaskManager(settings, clusterSettings, threadPool, taskHeaders); } } }; diff --git a/server/src/test/java/org/opensearch/index/IndexingSlowLogTests.java b/server/src/test/java/org/opensearch/index/IndexingSlowLogTests.java index 38c8491d79150..75a346e444b73 100644 --- a/server/src/test/java/org/opensearch/index/IndexingSlowLogTests.java +++ b/server/src/test/java/org/opensearch/index/IndexingSlowLogTests.java @@ -86,8 +86,8 @@ public static void init() throws IllegalAccessException { @AfterClass public static void cleanup() { - appender.stop(); Loggers.removeAppender(testLogger1, appender); + appender.stop(); } public void testLevelPrecedence() { diff --git a/server/src/test/java/org/opensearch/index/SearchSlowLogTests.java b/server/src/test/java/org/opensearch/index/SearchSlowLogTests.java index ae159092a4833..ea146ec20b16a 100644 --- a/server/src/test/java/org/opensearch/index/SearchSlowLogTests.java +++ b/server/src/test/java/org/opensearch/index/SearchSlowLogTests.java @@ -84,9 +84,9 @@ public static void init() throws IllegalAccessException { @AfterClass public static void cleanup() { - appender.stop(); Loggers.removeAppender(queryLog, appender); Loggers.removeAppender(fetchLog, appender); + appender.stop(); } @Override diff --git a/server/src/test/java/org/opensearch/tasks/consumer/SearchShardTaskDetailsLogMessageTests.java b/server/src/test/java/org/opensearch/tasks/consumer/SearchShardTaskDetailsLogMessageTests.java new file mode 100644 index 0000000000000..641fdef4891bd --- /dev/null +++ b/server/src/test/java/org/opensearch/tasks/consumer/SearchShardTaskDetailsLogMessageTests.java @@ -0,0 +1,61 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.tasks.consumer; + +import org.opensearch.action.search.SearchShardTask; +import org.opensearch.tasks.ResourceStats; +import org.opensearch.tasks.ResourceStatsType; +import org.opensearch.tasks.ResourceUsageMetric; +import org.opensearch.tasks.Task; +import org.opensearch.test.OpenSearchSingleNodeTestCase; + +import java.util.Collections; + +import static org.hamcrest.Matchers.equalTo; + +public class SearchShardTaskDetailsLogMessageTests extends OpenSearchSingleNodeTestCase { + public void testTaskDetailsLogHasJsonFields() { + SearchShardTask task = new SearchShardTask( + 0, + "n/a", + "n/a", + "test", + null, + Collections.singletonMap(Task.X_OPAQUE_ID, "my_id"), + () -> "test_metadata" + ); + SearchShardTaskDetailsLogMessage p = new SearchShardTaskDetailsLogMessage(task); + + assertThat(p.getValueFor("taskId"), equalTo("0")); + assertThat(p.getValueFor("type"), equalTo("n/a")); + assertThat(p.getValueFor("action"), equalTo("n/a")); + assertThat(p.getValueFor("description"), equalTo("test")); + assertThat(p.getValueFor("parentTaskId"), equalTo(null)); + // when no resource information present + assertThat(p.getValueFor("resource_stats"), equalTo("{}")); + assertThat(p.getValueFor("metadata"), equalTo("test_metadata")); + + task.startThreadResourceTracking( + 0, + ResourceStatsType.WORKER_STATS, + new ResourceUsageMetric(ResourceStats.MEMORY, 0L), + new ResourceUsageMetric(ResourceStats.CPU, 0L) + ); + task.updateThreadResourceStats( + 0, + ResourceStatsType.WORKER_STATS, + new ResourceUsageMetric(ResourceStats.MEMORY, 100), + new ResourceUsageMetric(ResourceStats.CPU, 100) + ); + assertThat( + p.getValueFor("resource_stats"), + equalTo("{0=[{cpu_time_in_nanos=100, memory_in_bytes=100}, stats_type=worker_stats, is_active=true, threadId=0]}") + ); + } +} diff --git a/server/src/test/java/org/opensearch/tasks/consumer/TopNSearchTasksLoggerTests.java b/server/src/test/java/org/opensearch/tasks/consumer/TopNSearchTasksLoggerTests.java new file mode 100644 index 0000000000000..a8fd3623ef09d --- /dev/null +++ b/server/src/test/java/org/opensearch/tasks/consumer/TopNSearchTasksLoggerTests.java @@ -0,0 +1,105 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.tasks.consumer; + +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.LogEvent; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.opensearch.action.search.SearchShardTask; +import org.opensearch.common.logging.Loggers; +import org.opensearch.common.logging.MockAppender; +import org.opensearch.common.settings.Settings; +import org.opensearch.tasks.ResourceStats; +import org.opensearch.tasks.ResourceStatsType; +import org.opensearch.tasks.ResourceUsageMetric; +import org.opensearch.tasks.Task; +import org.opensearch.test.OpenSearchSingleNodeTestCase; + +import java.util.Collections; + +import static org.opensearch.tasks.consumer.TopNSearchTasksLogger.LOG_TOP_QUERIES_FREQUENCY; +import static org.opensearch.tasks.consumer.TopNSearchTasksLogger.LOG_TOP_QUERIES_SIZE; + +public class TopNSearchTasksLoggerTests extends OpenSearchSingleNodeTestCase { + static MockAppender appender; + static Logger searchLogger = LogManager.getLogger(TopNSearchTasksLogger.TASK_DETAILS_LOG_PREFIX + ".search"); + + private TopNSearchTasksLogger topNSearchTasksLogger; + + @BeforeClass + public static void init() throws IllegalAccessException { + appender = new MockAppender("trace_appender"); + appender.start(); + Loggers.addAppender(searchLogger, appender); + } + + @AfterClass + public static void cleanup() { + Loggers.removeAppender(searchLogger, appender); + appender.stop(); + } + + public void testLoggerWithTasks() { + final Settings settings = Settings.builder().put(LOG_TOP_QUERIES_SIZE, 1).put(LOG_TOP_QUERIES_FREQUENCY, "0ms").build(); + topNSearchTasksLogger = new TopNSearchTasksLogger(settings); + generateTasks(5); + LogEvent logEvent = appender.getLastEventAndReset(); + assertNotNull(logEvent); + assertEquals(logEvent.getLevel(), Level.INFO); + assertTrue(logEvent.getMessage().getFormattedMessage().contains("cpu_time_in_nanos=300, memory_in_bytes=300")); + } + + public void testLoggerWithoutTasks() { + final Settings settings = Settings.builder().put(LOG_TOP_QUERIES_SIZE, 1).put(LOG_TOP_QUERIES_FREQUENCY, "500ms").build(); + topNSearchTasksLogger = new TopNSearchTasksLogger(settings); + + assertNull(appender.getLastEventAndReset()); + } + + public void testLoggerWithHighFrequency() { + // setting the frequency to a really large value and confirming that nothing gets written to log file. + final Settings settings = Settings.builder().put(LOG_TOP_QUERIES_SIZE, 1).put(LOG_TOP_QUERIES_FREQUENCY, "10m").build(); + topNSearchTasksLogger = new TopNSearchTasksLogger(settings); + generateTasks(5); + generateTasks(2); + + assertNull(appender.getLastEventAndReset()); + } + + // generate search tasks and updates the topN search tasks logger consumer. + public void generateTasks(int numberOfTasks) { + for (int i = 0; i < numberOfTasks; i++) { + Task task = new SearchShardTask( + i, + "n/a", + "n/a", + "test", + null, + Collections.singletonMap(Task.X_OPAQUE_ID, "my_id"), + () -> "n/a" + ); + task.startThreadResourceTracking( + i, + ResourceStatsType.WORKER_STATS, + new ResourceUsageMetric(ResourceStats.MEMORY, 0L), + new ResourceUsageMetric(ResourceStats.CPU, 0L) + ); + task.updateThreadResourceStats( + i, + ResourceStatsType.WORKER_STATS, + new ResourceUsageMetric(ResourceStats.MEMORY, i * 100L), + new ResourceUsageMetric(ResourceStats.CPU, i * 100L) + ); + topNSearchTasksLogger.accept(task); + } + } +} diff --git a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java index a588909085160..249ffcfd0bf6e 100644 --- a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java @@ -76,6 +76,7 @@ import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentType; @@ -201,6 +202,7 @@ protected class ReplicationGroup implements AutoCloseable, Iterable private final AtomicInteger docId = new AtomicInteger(); boolean closed = false; private volatile ReplicationTargets replicationTargets; + private final ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); private final PrimaryReplicaSyncer primaryReplicaSyncer = new PrimaryReplicaSyncer( new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet()), diff --git a/test/framework/src/main/java/org/opensearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/opensearch/test/transport/MockTransportService.java index 9b9baebd540c3..c80b120ad0148 100644 --- a/test/framework/src/main/java/org/opensearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/opensearch/test/transport/MockTransportService.java @@ -261,11 +261,16 @@ private static TransportAddress[] extractTransportAddresses(TransportService tra } @Override - protected TaskManager createTaskManager(Settings settings, ThreadPool threadPool, Set taskHeaders) { + protected TaskManager createTaskManager( + Settings settings, + ClusterSettings clusterSettings, + ThreadPool threadPool, + Set taskHeaders + ) { if (MockTaskManager.USE_MOCK_TASK_MANAGER_SETTING.get(settings)) { return new MockTaskManager(settings, threadPool, taskHeaders); } else { - return super.createTaskManager(settings, threadPool, taskHeaders); + return super.createTaskManager(settings, clusterSettings, threadPool, taskHeaders); } } @@ -530,7 +535,6 @@ public void clearCallback() { /** * Adds a new handling behavior that is used when the defined request is received. - * */ public void addRequestHandlingBehavior( String actionName, From 24527fcbced32185f4659898f9e9cf7413c6a8f2 Mon Sep 17 00:00:00 2001 From: pranikum <109206473+pranikum@users.noreply.github.com> Date: Fri, 5 Aug 2022 23:02:20 +0530 Subject: [PATCH 068/104] Issue:3933 -Multi-node setup for local gradle run (#3958) * Issue:3933 - Multinode setup for local gradle run Signed-off-by: Pranit Kumar --- .../testclusters/OpenSearchCluster.java | 40 ++++++++++++++++--- .../gradle/testclusters/OpenSearchNode.java | 11 ++++- gradle/run.gradle | 5 +++ 3 files changed, 50 insertions(+), 6 deletions(-) diff --git a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchCluster.java b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchCluster.java index ef52adab6377a..0f5348d5a8dcf 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchCluster.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchCluster.java @@ -84,6 +84,8 @@ public class OpenSearchCluster implements TestClusterConfiguration, Named { private final ArchiveOperations archiveOperations; private int nodeIndex = 0; + private int zoneCount = 1; + public OpenSearchCluster( String clusterName, Project project, @@ -104,13 +106,21 @@ public OpenSearchCluster( this.bwcJdk = bwcJdk; // Always add the first node - addNode(clusterName + "-0"); + String zone = hasZoneProperty() ? "zone-1" : ""; + addNode(clusterName + "-0", zone); // configure the cluster name eagerly so all nodes know about it this.nodes.all((node) -> node.defaultConfig.put("cluster.name", safeName(clusterName))); addWaitForClusterHealth(); } + public void setNumberOfZones(int zoneCount) { + if (zoneCount < 1) { + throw new IllegalArgumentException("Number of zones should be >= 1 but was " + zoneCount + " for " + this); + } + this.zoneCount = zoneCount; + } + public void setNumberOfNodes(int numberOfNodes) { checkFrozen(); @@ -124,12 +134,31 @@ public void setNumberOfNodes(int numberOfNodes) { ); } - for (int i = nodes.size(); i < numberOfNodes; i++) { - addNode(clusterName + "-" + i); + if (numberOfNodes < zoneCount) { + throw new IllegalArgumentException( + "Number of nodes should be >= zoneCount but was " + numberOfNodes + " for " + this.zoneCount + ); } + + if (hasZoneProperty()) { + int currentZone; + for (int i = nodes.size(); i < numberOfNodes; i++) { + currentZone = i % zoneCount + 1; + String zoneName = "zone-" + currentZone; + addNode(clusterName + "-" + i, zoneName); + } + } else { + for (int i = nodes.size(); i < numberOfNodes; i++) { + addNode(clusterName + "-" + i, ""); + } + } + } + + private boolean hasZoneProperty() { + return this.project.findProperty("numZones") != null; } - private void addNode(String nodeName) { + private void addNode(String nodeName, String zoneName) { OpenSearchNode newNode = new OpenSearchNode( path, nodeName, @@ -138,7 +167,8 @@ private void addNode(String nodeName) { fileSystemOperations, archiveOperations, workingDirBase, - bwcJdk + bwcJdk, + zoneName ); // configure the cluster name eagerly newNode.defaultConfig.put("cluster.name", safeName(clusterName)); diff --git a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchNode.java b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchNode.java index b051c15e81d6d..ab765efde7885 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchNode.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchNode.java @@ -32,6 +32,7 @@ package org.opensearch.gradle.testclusters; import org.apache.commons.io.FileUtils; +import org.apache.commons.lang.StringUtils; import org.opensearch.gradle.Architecture; import org.opensearch.gradle.DistributionDownloadPlugin; import org.opensearch.gradle.OpenSearchDistribution; @@ -175,6 +176,8 @@ public class OpenSearchNode implements TestClusterConfiguration { private final Config legacyESConfig; private Config currentConfig; + private String zone; + OpenSearchNode( String path, String name, @@ -183,7 +186,8 @@ public class OpenSearchNode implements TestClusterConfiguration { FileSystemOperations fileSystemOperations, ArchiveOperations archiveOperations, File workingDirBase, - Jdk bwcJdk + Jdk bwcJdk, + String zone ) { this.path = path; this.name = name; @@ -205,6 +209,7 @@ public class OpenSearchNode implements TestClusterConfiguration { opensearchConfig = Config.getOpenSearchConfig(workingDir); legacyESConfig = Config.getLegacyESConfig(workingDir); currentConfig = opensearchConfig; + this.zone = zone; } /* @@ -1239,6 +1244,10 @@ private void createConfiguration() { baseConfig.put("path.logs", confPathLogs.toAbsolutePath().toString()); baseConfig.put("path.shared_data", workingDir.resolve("sharedData").toString()); baseConfig.put("node.attr.testattr", "test"); + if (StringUtils.isNotBlank(zone)) { + baseConfig.put("cluster.routing.allocation.awareness.attributes", "zone"); + baseConfig.put("node.attr.zone", zone); + } baseConfig.put("node.portsfile", "true"); baseConfig.put("http.port", httpPort); if (getVersion().onOrAfter(Version.fromString("6.7.0"))) { diff --git a/gradle/run.gradle b/gradle/run.gradle index 5a1fed06c0ef7..639479e97d28f 100644 --- a/gradle/run.gradle +++ b/gradle/run.gradle @@ -31,9 +31,14 @@ import org.opensearch.gradle.testclusters.RunTask apply plugin: 'opensearch.testclusters' +def numNodes = findProperty('numNodes') as Integer ?: 1 +def numZones = findProperty('numZones') as Integer ?: 1 + testClusters { runTask { testDistribution = 'archive' + if (numZones > 1) numberOfZones = numZones + if (numNodes > 1) numberOfNodes = numNodes } } From 203f44e94cb4f04077ae0d0c800bb48b3852252b Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Fri, 5 Aug 2022 14:25:44 -0400 Subject: [PATCH 069/104] OpenSearch crashes on closed client connection before search reply when total ops higher compared to expected (#4143) Signed-off-by: Andriy Redko --- .../search/AbstractSearchAsyncAction.java | 11 +++- .../AbstractSearchAsyncActionTests.java | 51 +++++++++++++++++++ 2 files changed, 61 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java index 1597b31e89871..0876bf93a557b 100644 --- a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java @@ -302,7 +302,16 @@ public void onFailure(Exception t) { * It is possible to run into connection exceptions here because we are getting the connection early and might * run into nodes that are not connected. In this case, on shard failure will move us to the next shard copy. */ - fork(() -> onShardFailure(shardIndex, shard, shardIt, e)); + fork(() -> { + // It only happens when onPhaseDone() is called and executePhaseOnShard() fails hard with an exception. + // In this case calling onShardFailure() would overflow the operations counter, so the best we could do + // here is to fail the phase and move on to the next one. + if (totalOps.get() == expectedTotalOps) { + onPhaseFailure(this, "The phase has failed", e); + } else { + onShardFailure(shardIndex, shard, shardIt, e); + } + }); } finally { executeNext(pendingExecutions, thread); } diff --git a/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java b/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java index b44b59b8a4ad5..ad2657517df9a 100644 --- a/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java @@ -477,6 +477,57 @@ public void onFailure(Exception e) { assertThat(searchResponse.getSuccessfulShards(), equalTo(shards.length)); } + public void testExecutePhaseOnShardFailure() throws InterruptedException { + final Index index = new Index("test", UUID.randomUUID().toString()); + + final SearchShardIterator[] shards = IntStream.range(0, 2 + randomInt(3)) + .mapToObj(i -> new SearchShardIterator(null, new ShardId(index, i), List.of("n1", "n2", "n3"), null, null, null)) + .toArray(SearchShardIterator[]::new); + + final AtomicBoolean fail = new AtomicBoolean(true); + final CountDownLatch latch = new CountDownLatch(1); + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + searchRequest.setMaxConcurrentShardRequests(5); + + final ArraySearchPhaseResults queryResult = new ArraySearchPhaseResults<>(shards.length); + AbstractSearchAsyncAction action = createAction( + searchRequest, + queryResult, + new ActionListener() { + @Override + public void onResponse(SearchResponse response) {} + + @Override + public void onFailure(Exception e) { + try { + // We end up here only when onPhaseDone() is called (causing NPE) and + // ending up in the onPhaseFailure() callback + if (fail.compareAndExchange(true, false)) { + assertThat(e, instanceOf(SearchPhaseExecutionException.class)); + throw new RuntimeException("Simulated exception"); + } + } finally { + executor.submit(() -> latch.countDown()); + } + } + }, + false, + false, + new AtomicLong(), + shards + ); + action.run(); + assertTrue(latch.await(1, TimeUnit.SECONDS)); + + InternalSearchResponse internalSearchResponse = InternalSearchResponse.empty(); + SearchResponse searchResponse = action.buildSearchResponse(internalSearchResponse, action.buildShardFailures(), null, null); + assertSame(searchResponse.getAggregations(), internalSearchResponse.aggregations()); + assertSame(searchResponse.getSuggest(), internalSearchResponse.suggest()); + assertSame(searchResponse.getProfileResults(), internalSearchResponse.profile()); + assertSame(searchResponse.getHits(), internalSearchResponse.hits()); + assertThat(searchResponse.getSuccessfulShards(), equalTo(shards.length)); + } + private static final class PhaseResult extends SearchPhaseResult { PhaseResult(ShardSearchContextId contextId) { this.contextId = contextId; From c665e7c5f202074ff7f910431302d0aeab4adb90 Mon Sep 17 00:00:00 2001 From: Kartik Ganesh Date: Fri, 5 Aug 2022 15:47:30 -0700 Subject: [PATCH 070/104] 2.2.0 release notes (#4151) (#4153) * Added rough release notes for 2.2 This includes an inline sed command to rewrite PR ids to full Github URLs. Command - git log --pretty=format:"* %s" --since=6-24-2022 | sed -E "s|(\(#([0-9]*)\))|([#\2](https://github.com/opensearch-project/opensearch/pull/\2))|g" Signed-off-by: Kartik Ganesh * Organizing 2.2.0 release notes into sections Signed-off-by: Kartik Ganesh * Removed lines from changelog that were already in 2.1.0 Signed-off-by: Kartik Ganesh * Adding one last commit to release notes Signed-off-by: Kartik Ganesh --- .../opensearch.release-notes-2.2.0.md | 79 +++++++++++++++++++ 1 file changed, 79 insertions(+) create mode 100644 release-notes/opensearch.release-notes-2.2.0.md diff --git a/release-notes/opensearch.release-notes-2.2.0.md b/release-notes/opensearch.release-notes-2.2.0.md new file mode 100644 index 0000000000000..74e76cfe46b5a --- /dev/null +++ b/release-notes/opensearch.release-notes-2.2.0.md @@ -0,0 +1,79 @@ +## 2022-08-05 Version 2.2.0 Release Notes + +### Features/Enhancements + +* Task consumer Integration ([#2293](https://github.com/opensearch-project/opensearch/pull/2293)) ([#4141](https://github.com/opensearch-project/opensearch/pull/4141)) +* [Backport 2.x] [Segment Replication] Add SegmentReplicationTargetService to orchestrate replication events. ([#4074](https://github.com/opensearch-project/opensearch/pull/4074)) +* Support task resource tracking in OpenSearch ([#3982](https://github.com/opensearch-project/opensearch/pull/3982)) ([#4087](https://github.com/opensearch-project/opensearch/pull/4087)) +* Making shard copy count a multiple of attribute count ([#3462](https://github.com/opensearch-project/opensearch/pull/3462)) ([#4086](https://github.com/opensearch-project/opensearch/pull/4086)) +* [Backport 2.x] [Segment Rreplication] Adding CheckpointRefreshListener to trigger when Segment replication is turned on and Primary shard refreshes ([#4044](https://github.com/opensearch-project/opensearch/pull/4044)) +* Add doc_count field mapper ([#3985](https://github.com/opensearch-project/opensearch/pull/3985)) ([#4037](https://github.com/opensearch-project/opensearch/pull/4037)) +* Parallelize stale blobs deletion during snapshot delete ([#3796](https://github.com/opensearch-project/opensearch/pull/3796)) ([#3990](https://github.com/opensearch-project/opensearch/pull/3990)) +* [Backport 2.x] [Segment Replication] Add a new Engine implementation for replicas with segment replication enabled. ([#4003](https://github.com/opensearch-project/opensearch/pull/4003)) +* [Backport 2.x] Adds a new parameter, max_analyzer_offset, for the highlighter ([#4031](https://github.com/opensearch-project/opensearch/pull/4031)) +* Update merge on refresh and merge on commit defaults in Opensearch (Lucene 9.3) ([#3561](https://github.com/opensearch-project/opensearch/pull/3561)) ([#4013](https://github.com/opensearch-project/opensearch/pull/4013)) +* Make HybridDirectory MMAP Extensions Configurable ([#3837](https://github.com/opensearch-project/opensearch/pull/3837)) ([#3970](https://github.com/opensearch-project/opensearch/pull/3970)) +* Add option to disable chunked transfer-encoding ([#3864](https://github.com/opensearch-project/opensearch/pull/3864)) ([#3885](https://github.com/opensearch-project/opensearch/pull/3885)) +* Introducing TranslogManager implementations decoupled from the Engine [2.x] ([#3820](https://github.com/opensearch-project/opensearch/pull/3820)) +* Changing default no_master_block from write to metadata_write ([#3621](https://github.com/opensearch-project/opensearch/pull/3621)) ([#3756](https://github.com/opensearch-project/opensearch/pull/3756)) + +### Bug Fixes + +* OpenSearch crashes on closed client connection before search reply when total ops higher compared to expected ([#4143](https://github.com/opensearch-project/opensearch/pull/4143)) ([#4145](https://github.com/opensearch-project/opensearch/pull/4145)) +* Binding empty instance of SegmentReplicationCheckpointPublisher when Feature Flag is off in IndicesModule.java file. ([#4119](https://github.com/opensearch-project/opensearch/pull/4119)) +* Fix the bug that masterOperation(with task param) is bypassed ([#4103](https://github.com/opensearch-project/opensearch/pull/4103)) ([#4115](https://github.com/opensearch-project/opensearch/pull/4115)) +* Fixing flaky org.opensearch.cluster.routing.allocation.decider.DiskThresholdDeciderIT.testHighWatermarkNotExceeded test case ([#4012](https://github.com/opensearch-project/opensearch/pull/4012)) ([#4014](https://github.com/opensearch-project/opensearch/pull/4014)) +* Correct typo: Rutime -> Runtime ([#3896](https://github.com/opensearch-project/opensearch/pull/3896)) ([#3898](https://github.com/opensearch-project/opensearch/pull/3898)) +* Fixing implausibly old time stamp 1970-01-01 00:00:00 by using the timestamp from the Git revision instead of default 0 value ([#3883](https://github.com/opensearch-project/opensearch/pull/3883)) ([#3891](https://github.com/opensearch-project/opensearch/pull/3891)) + +### Infrastructure + +* Correctly ignore depandabot branches during push ([#4077](https://github.com/opensearch-project/opensearch/pull/4077)) ([#4113](https://github.com/opensearch-project/opensearch/pull/4113)) +* Build performance improvements ([#3926](https://github.com/opensearch-project/opensearch/pull/3926)) ([#3937](https://github.com/opensearch-project/opensearch/pull/3937)) +* PR coverage requirement and default settings ([#3931](https://github.com/opensearch-project/opensearch/pull/3931)) ([#3938](https://github.com/opensearch-project/opensearch/pull/3938)) +* [Backport 2.x] Fail build on wildcard imports ([#3940](https://github.com/opensearch-project/opensearch/pull/3940)) +* Don't run EmptyDirTaskTests in a Docker container ([#3792](https://github.com/opensearch-project/opensearch/pull/3792)) ([#3912](https://github.com/opensearch-project/opensearch/pull/3912)) +* Add coverage, gha, jenkins server, documentation and forum badges ([#3886](https://github.com/opensearch-project/opensearch/pull/3886)) +* Unable to use Systemd module with tar distribution ([#3755](https://github.com/opensearch-project/opensearch/pull/3755)) ([#3903](https://github.com/opensearch-project/opensearch/pull/3903)) +* Ignore backport / autocut / dependentbot branches for gradle checks ([#3816](https://github.com/opensearch-project/opensearch/pull/3816)) ([#3825](https://github.com/opensearch-project/opensearch/pull/3825)) +* Setup branch push coverage and fix coverage uploads ([#3793](https://github.com/opensearch-project/opensearch/pull/3793)) ([#3811](https://github.com/opensearch-project/opensearch/pull/3811)) +* Enable XML test reports for Jenkins integration ([#3799](https://github.com/opensearch-project/opensearch/pull/3799)) ([#3803](https://github.com/opensearch-project/opensearch/pull/3803)) + +### Maintenance + +* OpenJDK Update (July 2022 Patch releases) ([#4023](https://github.com/opensearch-project/opensearch/pull/4023)) ([#4092](https://github.com/opensearch-project/opensearch/pull/4092)) +* Update to Lucene 9.3.0 ([#4043](https://github.com/opensearch-project/opensearch/pull/4043)) ([#4088](https://github.com/opensearch-project/opensearch/pull/4088)) +* Bump commons-configuration2 from 2.7 to 2.8.0 in /plugins/repository-hdfs ([#3764](https://github.com/opensearch-project/opensearch/pull/3764)) ([#3783](https://github.com/opensearch-project/opensearch/pull/3783)) +* Use bash in systemd-entrypoint shebang ([#4008](https://github.com/opensearch-project/opensearch/pull/4008)) ([#4009](https://github.com/opensearch-project/opensearch/pull/4009)) +* Bump com.gradle.enterprise from 3.10.1 to 3.10.2 ([#3568](https://github.com/opensearch-project/opensearch/pull/3568)) ([#3934](https://github.com/opensearch-project/opensearch/pull/3934)) +* Bump log4j-core in /buildSrc/src/testKit/thirdPartyAudit/sample_jars ([#3763](https://github.com/opensearch-project/opensearch/pull/3763)) ([#3784](https://github.com/opensearch-project/opensearch/pull/3784)) +* Added bwc version 1.3.5 ([#3911](https://github.com/opensearch-project/opensearch/pull/3911)) ([#3913](https://github.com/opensearch-project/opensearch/pull/3913)) +* Update to Gradle 7.5 ([#3594](https://github.com/opensearch-project/opensearch/pull/3594)) ([#3904](https://github.com/opensearch-project/opensearch/pull/3904)) +* Update Netty to 4.1.79.Final ([#3868](https://github.com/opensearch-project/opensearch/pull/3868)) ([#3874](https://github.com/opensearch-project/opensearch/pull/3874)) +* Upgrade MinIO image version ([#3541](https://github.com/opensearch-project/opensearch/pull/3541)) ([#3867](https://github.com/opensearch-project/opensearch/pull/3867)) +* Add netty-transport-native-unix-common to modules/transport-netty4/bu… ([#3848](https://github.com/opensearch-project/opensearch/pull/3848)) ([#3853](https://github.com/opensearch-project/opensearch/pull/3853)) +* Update outdated dependencies ([#3821](https://github.com/opensearch-project/opensearch/pull/3821)) ([#3854](https://github.com/opensearch-project/opensearch/pull/3854)) +* Added bwc version 2.1.1 ([#3806](https://github.com/opensearch-project/opensearch/pull/3806)) +* Upgrade netty from 4.1.73.Final to 4.1.78.Final ([#3772](https://github.com/opensearch-project/opensearch/pull/3772)) ([#3778](https://github.com/opensearch-project/opensearch/pull/3778)) +* Bump protobuf-java from 3.21.1 to 3.21.2 in /plugins/repository-hdfs ([#3711](https://github.com/opensearch-project/opensearch/pull/3711)) ([#3726](https://github.com/opensearch-project/opensearch/pull/3726)) +* Upgrading AWS SDK dependency for native plugins ([#3694](https://github.com/opensearch-project/opensearch/pull/3694)) ([#3701](https://github.com/opensearch-project/opensearch/pull/3701)) + +### Refactoring + +* [Backport 2.x] Changes to encapsulate Translog into TranslogManager ([#4095](https://github.com/opensearch-project/opensearch/pull/4095)) ([#4142](https://github.com/opensearch-project/opensearch/pull/4142)) +* Deprecate and rename abstract methods in interfaces that contain 'master' in name ([#4121](https://github.com/opensearch-project/opensearch/pull/4121)) ([#4123](https://github.com/opensearch-project/opensearch/pull/4123)) +* [Backport 2.x] Integrate Engine with decoupled Translog interfaces ([#3822](https://github.com/opensearch-project/opensearch/pull/3822)) +* Deprecate class FakeThreadPoolMasterService, BlockMasterServiceOnMaster and BusyMasterServiceDisruption ([#4058](https://github.com/opensearch-project/opensearch/pull/4058)) ([#4068](https://github.com/opensearch-project/opensearch/pull/4068)) +* Rename classes with name 'MasterService' to 'ClusterManagerService' in directory 'test/framework' ([#4051](https://github.com/opensearch-project/opensearch/pull/4051)) ([#4057](https://github.com/opensearch-project/opensearch/pull/4057)) +* Deprecate class 'MasterService' and create alternative class 'ClusterManagerService' ([#4022](https://github.com/opensearch-project/opensearch/pull/4022)) ([#4050](https://github.com/opensearch-project/opensearch/pull/4050)) +* Deprecate and Rename abstract methods from 'Master' terminology to 'ClusterManager'. ([#4032](https://github.com/opensearch-project/opensearch/pull/4032)) ([#4048](https://github.com/opensearch-project/opensearch/pull/4048)) +* Deprecate public methods and variables that contain 'master' terminology in class 'NoMasterBlockService' and 'MasterService' ([#4006](https://github.com/opensearch-project/opensearch/pull/4006)) ([#4038](https://github.com/opensearch-project/opensearch/pull/4038)) +* Deprecate public methods and variables that contain 'master' terminology in 'client' directory ([#3966](https://github.com/opensearch-project/opensearch/pull/3966)) ([#3981](https://github.com/opensearch-project/opensearch/pull/3981)) +* [segment replication]Introducing common Replication interfaces for segment replication and recovery code paths ([#3234](https://github.com/opensearch-project/opensearch/pull/3234)) ([#3984](https://github.com/opensearch-project/opensearch/pull/3984)) +* Deprecate public methods and variables that contain 'master' terminology in 'test/framework' directory ([#3978](https://github.com/opensearch-project/opensearch/pull/3978)) ([#3987](https://github.com/opensearch-project/opensearch/pull/3987)) +* [Backport 2.x] [Segment Replication] Moving RecoveryState.Index to a top-level class and renaming ([#3971](https://github.com/opensearch-project/opensearch/pull/3971)) +* Rename and deprecate public methods that contains 'master' in the name in 'server' directory ([#3647](https://github.com/opensearch-project/opensearch/pull/3647)) ([#3964](https://github.com/opensearch-project/opensearch/pull/3964)) +* [2.x] Deprecate public class names with master terminology ([#3871](https://github.com/opensearch-project/opensearch/pull/3871)) ([#3914](https://github.com/opensearch-project/opensearch/pull/3914)) +* [Backport 2.x] Rename public classes with 'Master' to 'ClusterManager' ([#3870](https://github.com/opensearch-project/opensearch/pull/3870)) +* Revert renaming masterOperation() to clusterManagerOperation() ([#3681](https://github.com/opensearch-project/opensearch/pull/3681)) ([#3714](https://github.com/opensearch-project/opensearch/pull/3714)) +* Revert renaming method onMaster() and offMaster() in interface LocalNodeMasterListener ([#3686](https://github.com/opensearch-project/opensearch/pull/3686)) ([#3693](https://github.com/opensearch-project/opensearch/pull/3693)) From 6993ac9834d0f7e8e77e6cade09de2245d2a20f7 Mon Sep 17 00:00:00 2001 From: Movva Ajaykumar Date: Mon, 8 Aug 2022 17:23:40 +0530 Subject: [PATCH 071/104] Added Point In Time Node Stats API ServiceLayer Changes (#4030) * Adds Node Stats api changes for the Point in time Signed-off-by: Ajay Kumar Movva --- .../index/search/stats/SearchStats.java | 56 +++++++++++++++++++ .../index/search/stats/ShardSearchStats.java | 17 ++++++ .../index/search/stats/SearchStatsTests.java | 9 ++- .../search/CreatePitSingleNodeTests.java | 39 +++++++++++++ .../search/DeletePitMultiNodeTests.java | 24 ++++++++ .../opensearch/search/PitMultiNodeTests.java | 18 ++++++ .../opensearch/search/SearchServiceTests.java | 17 +++++- 7 files changed, 175 insertions(+), 5 deletions(-) diff --git a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java index fe23000902608..38b18355fd98d 100644 --- a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java +++ b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java @@ -32,6 +32,7 @@ package org.opensearch.index.search.stats; +import org.opensearch.Version; import org.opensearch.common.Nullable; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; @@ -77,6 +78,10 @@ public static class Stats implements Writeable, ToXContentFragment { private long suggestTimeInMillis; private long suggestCurrent; + private long pitCount; + private long pitTimeInMillis; + private long pitCurrent; + private Stats() { // for internal use, initializes all counts to 0 } @@ -91,6 +96,9 @@ public Stats( long scrollCount, long scrollTimeInMillis, long scrollCurrent, + long pitCount, + long pitTimeInMillis, + long pitCurrent, long suggestCount, long suggestTimeInMillis, long suggestCurrent @@ -110,6 +118,10 @@ public Stats( this.suggestCount = suggestCount; this.suggestTimeInMillis = suggestTimeInMillis; this.suggestCurrent = suggestCurrent; + + this.pitCount = pitCount; + this.pitTimeInMillis = pitTimeInMillis; + this.pitCurrent = pitCurrent; } private Stats(StreamInput in) throws IOException { @@ -128,6 +140,12 @@ private Stats(StreamInput in) throws IOException { suggestCount = in.readVLong(); suggestTimeInMillis = in.readVLong(); suggestCurrent = in.readVLong(); + + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + pitCount = in.readVLong(); + pitTimeInMillis = in.readVLong(); + pitCurrent = in.readVLong(); + } } public void add(Stats stats) { @@ -146,6 +164,10 @@ public void add(Stats stats) { suggestCount += stats.suggestCount; suggestTimeInMillis += stats.suggestTimeInMillis; suggestCurrent += stats.suggestCurrent; + + pitCount += stats.pitCount; + pitTimeInMillis += stats.pitTimeInMillis; + pitCurrent += stats.pitCurrent; } public void addForClosingShard(Stats stats) { @@ -162,6 +184,10 @@ public void addForClosingShard(Stats stats) { suggestCount += stats.suggestCount; suggestTimeInMillis += stats.suggestTimeInMillis; + + pitCount += stats.pitCount; + pitTimeInMillis += stats.pitTimeInMillis; + pitCurrent += stats.pitCurrent; } public long getQueryCount() { @@ -212,6 +238,22 @@ public long getScrollCurrent() { return scrollCurrent; } + public long getPitCount() { + return pitCount; + } + + public TimeValue getPitTime() { + return new TimeValue(pitTimeInMillis); + } + + public long getPitTimeInMillis() { + return pitTimeInMillis; + } + + public long getPitCurrent() { + return pitCurrent; + } + public long getSuggestCount() { return suggestCount; } @@ -249,6 +291,12 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(suggestCount); out.writeVLong(suggestTimeInMillis); out.writeVLong(suggestCurrent); + + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeVLong(pitCount); + out.writeVLong(pitTimeInMillis); + out.writeVLong(pitCurrent); + } } @Override @@ -265,6 +313,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.humanReadableField(Fields.SCROLL_TIME_IN_MILLIS, Fields.SCROLL_TIME, getScrollTime()); builder.field(Fields.SCROLL_CURRENT, scrollCurrent); + builder.field(Fields.PIT_TOTAL, pitCount); + builder.humanReadableField(Fields.PIT_TIME_IN_MILLIS, Fields.PIT_TIME, getPitTime()); + builder.field(Fields.PIT_CURRENT, pitCurrent); + builder.field(Fields.SUGGEST_TOTAL, suggestCount); builder.humanReadableField(Fields.SUGGEST_TIME_IN_MILLIS, Fields.SUGGEST_TIME, getSuggestTime()); builder.field(Fields.SUGGEST_CURRENT, suggestCurrent); @@ -385,6 +437,10 @@ static final class Fields { static final String SCROLL_TIME = "scroll_time"; static final String SCROLL_TIME_IN_MILLIS = "scroll_time_in_millis"; static final String SCROLL_CURRENT = "scroll_current"; + static final String PIT_TOTAL = "point_in_time_total"; + static final String PIT_TIME = "point_in_time_time"; + static final String PIT_TIME_IN_MILLIS = "point_in_time_time_in_millis"; + static final String PIT_CURRENT = "point_in_time_current"; static final String SUGGEST_TOTAL = "suggest_total"; static final String SUGGEST_TIME = "suggest_time"; static final String SUGGEST_TIME_IN_MILLIS = "suggest_time_in_millis"; diff --git a/server/src/main/java/org/opensearch/index/search/stats/ShardSearchStats.java b/server/src/main/java/org/opensearch/index/search/stats/ShardSearchStats.java index 3ef3571c75e59..6d0eb3a5949ca 100644 --- a/server/src/main/java/org/opensearch/index/search/stats/ShardSearchStats.java +++ b/server/src/main/java/org/opensearch/index/search/stats/ShardSearchStats.java @@ -187,6 +187,18 @@ public void onFreeScrollContext(ReaderContext readerContext) { totalStats.scrollMetric.inc(TimeUnit.NANOSECONDS.toMicros(System.nanoTime() - readerContext.getStartTimeInNano())); } + @Override + public void onNewPitContext(ReaderContext readerContext) { + totalStats.pitCurrent.inc(); + } + + @Override + public void onFreePitContext(ReaderContext readerContext) { + totalStats.pitCurrent.dec(); + assert totalStats.pitCurrent.count() >= 0; + totalStats.pitMetric.inc(TimeUnit.NANOSECONDS.toMicros(System.nanoTime() - readerContext.getStartTimeInNano())); + } + /** * Holder of statistics values * @@ -203,10 +215,12 @@ static final class StatsHolder { * for one-thousand times as long (i.e., scrolls that execute for almost twelve days on average). */ final MeanMetric scrollMetric = new MeanMetric(); + final MeanMetric pitMetric = new MeanMetric(); final MeanMetric suggestMetric = new MeanMetric(); final CounterMetric queryCurrent = new CounterMetric(); final CounterMetric fetchCurrent = new CounterMetric(); final CounterMetric scrollCurrent = new CounterMetric(); + final CounterMetric pitCurrent = new CounterMetric(); final CounterMetric suggestCurrent = new CounterMetric(); SearchStats.Stats stats() { @@ -220,6 +234,9 @@ SearchStats.Stats stats() { scrollMetric.count(), TimeUnit.MICROSECONDS.toMillis(scrollMetric.sum()), scrollCurrent.count(), + pitMetric.count(), + TimeUnit.MICROSECONDS.toMillis(pitMetric.sum()), + pitCurrent.count(), suggestMetric.count(), TimeUnit.NANOSECONDS.toMillis(suggestMetric.sum()), suggestCurrent.count() diff --git a/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java b/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java index 4682d35411b78..7d2d8e38d066e 100644 --- a/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java +++ b/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java @@ -45,9 +45,9 @@ public void testShardLevelSearchGroupStats() throws Exception { // let's create two dummy search stats with groups Map groupStats1 = new HashMap<>(); Map groupStats2 = new HashMap<>(); - groupStats2.put("group1", new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)); - SearchStats searchStats1 = new SearchStats(new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), 0, groupStats1); - SearchStats searchStats2 = new SearchStats(new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), 0, groupStats2); + groupStats2.put("group1", new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)); + SearchStats searchStats1 = new SearchStats(new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), 0, groupStats1); + SearchStats searchStats2 = new SearchStats(new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), 0, groupStats2); // adding these two search stats and checking group stats are correct searchStats1.add(searchStats2); @@ -75,6 +75,9 @@ private static void assertStats(Stats stats, long equalTo) { assertEquals(equalTo, stats.getScrollCount()); assertEquals(equalTo, stats.getScrollTimeInMillis()); assertEquals(equalTo, stats.getScrollCurrent()); + assertEquals(equalTo, stats.getPitCount()); + assertEquals(equalTo, stats.getPitTimeInMillis()); + assertEquals(equalTo, stats.getPitCurrent()); assertEquals(equalTo, stats.getSuggestCount()); assertEquals(equalTo, stats.getSuggestTimeInMillis()); assertEquals(equalTo, stats.getSuggestCurrent()); diff --git a/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java b/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java index b28423c3a8657..b730dc01c4871 100644 --- a/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java +++ b/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java @@ -24,6 +24,9 @@ import org.opensearch.search.builder.PointInTimeBuilder; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.OpenSearchSingleNodeTestCase; +import org.opensearch.index.IndexService; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.IndicesService; import java.util.Map; import java.util.concurrent.CountDownLatch; @@ -74,7 +77,11 @@ public void testCreatePITSuccess() throws ExecutionException, InterruptedExcepti SearchService service = getInstanceFromNode(SearchService.class); assertEquals(2, service.getActiveContexts()); + validatePitStats("index", 1, 0, 0); + validatePitStats("index", 1, 0, 1); service.doClose(); // this kills the keep-alive reaper we have to reset the node after this test + validatePitStats("index", 0, 1, 0); + validatePitStats("index", 0, 1, 1); } public void testCreatePITWithMultipleIndicesSuccess() throws ExecutionException, InterruptedException { @@ -91,7 +98,12 @@ public void testCreatePITWithMultipleIndicesSuccess() throws ExecutionException, PitTestsUtil.assertUsingGetAllPits(client(), response.getId(), response.getCreationTime()); assertEquals(4, response.getSuccessfulShards()); assertEquals(4, service.getActiveContexts()); + + validatePitStats("index", 1, 0, 0); + validatePitStats("index1", 1, 0, 0); service.doClose(); + validatePitStats("index", 0, 1, 0); + validatePitStats("index1", 0, 1, 0); } public void testCreatePITWithShardReplicasSuccess() throws ExecutionException, InterruptedException { @@ -112,7 +124,11 @@ public void testCreatePITWithShardReplicasSuccess() throws ExecutionException, I SearchService service = getInstanceFromNode(SearchService.class); assertEquals(2, service.getActiveContexts()); + validatePitStats("index", 1, 0, 0); + validatePitStats("index", 1, 0, 1); service.doClose(); + validatePitStats("index", 0, 1, 0); + validatePitStats("index", 0, 1, 1); } public void testCreatePITWithNonExistentIndex() { @@ -198,6 +214,9 @@ public void testPitSearchOnCloseIndex() throws ExecutionException, InterruptedEx PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime()); SearchService service = getInstanceFromNode(SearchService.class); assertEquals(2, service.getActiveContexts()); + validatePitStats("index", 1, 0, 0); + validatePitStats("index", 1, 0, 1); + client().admin().indices().prepareClose("index").get(); SearchPhaseExecutionException ex = expectThrows(SearchPhaseExecutionException.class, () -> { SearchResponse searchResponse = client().prepareSearch() @@ -246,7 +265,10 @@ public void testMaxOpenPitContexts() throws Exception { + "This limit can be set by changing the [search.max_open_pit_context] setting." ) ); + final int maxPitContexts = SearchService.MAX_OPEN_PIT_CONTEXT.get(Settings.EMPTY); + validatePitStats("index", maxPitContexts, 0, 0); service.doClose(); + validatePitStats("index", 0, maxPitContexts, 0); } public void testOpenPitContextsConcurrently() throws Exception { @@ -292,7 +314,9 @@ public void testOpenPitContextsConcurrently() throws Exception { thread.join(); } assertThat(service.getActiveContexts(), equalTo(maxPitContexts)); + validatePitStats("index", maxPitContexts, 0, 0); service.doClose(); + validatePitStats("index", 0, maxPitContexts, 0); } /** @@ -461,9 +485,11 @@ public void testPitAfterUpdateIndex() throws Exception { .getTotalHits().value, Matchers.equalTo(0L) ); + validatePitStats("test", 1, 0, 0); } finally { service.doClose(); assertEquals(0, service.getActiveContexts()); + validatePitStats("test", 0, 1, 0); PitTestsUtil.assertGetAllPitsEmpty(client()); } } @@ -505,8 +531,21 @@ public void testConcurrentSearches() throws Exception { SearchService service = getInstanceFromNode(SearchService.class); assertEquals(2, service.getActiveContexts()); + validatePitStats("index", 1, 0, 0); + validatePitStats("index", 1, 0, 1); service.doClose(); assertEquals(0, service.getActiveContexts()); + validatePitStats("index", 0, 1, 0); + validatePitStats("index", 0, 1, 1); PitTestsUtil.assertGetAllPitsEmpty(client()); } + + public void validatePitStats(String index, long expectedPitCurrent, long expectedPitCount, int shardId) throws ExecutionException, + InterruptedException { + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexService indexService = indicesService.indexServiceSafe(resolveIndex(index)); + IndexShard indexShard = indexService.getShard(shardId); + assertEquals(expectedPitCurrent, indexShard.searchStats().getTotal().getPitCurrent()); + assertEquals(expectedPitCount, indexShard.searchStats().getTotal().getPitCount()); + } } diff --git a/server/src/test/java/org/opensearch/search/DeletePitMultiNodeTests.java b/server/src/test/java/org/opensearch/search/DeletePitMultiNodeTests.java index 70efcf88cd581..e69b2cc523638 100644 --- a/server/src/test/java/org/opensearch/search/DeletePitMultiNodeTests.java +++ b/server/src/test/java/org/opensearch/search/DeletePitMultiNodeTests.java @@ -11,6 +11,8 @@ import org.junit.After; import org.junit.Before; import org.opensearch.action.ActionFuture; +import org.opensearch.action.admin.indices.stats.IndicesStatsRequest; +import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; import org.opensearch.action.ActionListener; import org.opensearch.action.LatchedActionListener; import org.opensearch.action.search.CreatePitAction; @@ -76,6 +78,7 @@ public void testDeletePit() throws Exception { execute = client().execute(CreatePitAction.INSTANCE, request); pitResponse = execute.get(); pitIds.add(pitResponse.getId()); + validatePitStats("index", 10, 0); DeletePitRequest deletePITRequest = new DeletePitRequest(pitIds); ActionFuture deleteExecute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); DeletePitResponse deletePITResponse = deleteExecute.get(); @@ -84,6 +87,7 @@ public void testDeletePit() throws Exception { assertTrue(pitIds.contains(deletePitInfo.getPitId())); assertTrue(deletePitInfo.isSuccessful()); } + validatePitStats("index", 0, 10); /** * Checking deleting the same PIT id again results in succeeded */ @@ -102,6 +106,7 @@ public void testDeletePitWithValidAndDeletedIds() throws Exception { CreatePitResponse pitResponse = execute.get(); List pitIds = new ArrayList<>(); pitIds.add(pitResponse.getId()); + validatePitStats("index", 5, 0); /** * Delete Pit #1 @@ -113,9 +118,11 @@ public void testDeletePitWithValidAndDeletedIds() throws Exception { assertTrue(pitIds.contains(deletePitInfo.getPitId())); assertTrue(deletePitInfo.isSuccessful()); } + validatePitStats("index", 0, 5); execute = client().execute(CreatePitAction.INSTANCE, request); pitResponse = execute.get(); pitIds.add(pitResponse.getId()); + validatePitStats("index", 5, 5); /** * Delete PIT with both Ids #1 (which is deleted) and #2 (which is present) */ @@ -126,6 +133,7 @@ public void testDeletePitWithValidAndDeletedIds() throws Exception { assertTrue(pitIds.contains(deletePitInfo.getPitId())); assertTrue(deletePitInfo.isSuccessful()); } + validatePitStats("index", 0, 10); } public void testDeletePitWithValidAndInvalidIds() throws Exception { @@ -148,6 +156,8 @@ public void testDeleteAllPits() throws Exception { client().prepareIndex("index1").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).execute().get(); ensureGreen(); createPitOnIndex("index1"); + validatePitStats("index", 5, 0); + validatePitStats("index1", 5, 0); DeletePitRequest deletePITRequest = new DeletePitRequest("_all"); /** @@ -160,6 +170,8 @@ public void testDeleteAllPits() throws Exception { assertThat(deletePitInfo.getPitId(), not(blankOrNullString())); assertTrue(deletePitInfo.isSuccessful()); } + validatePitStats("index", 0, 5); + validatePitStats("index1", 0, 5); client().admin().indices().prepareDelete("index1").get(); } @@ -324,4 +336,16 @@ public void onFailure(Exception e) {} } } + public void validatePitStats(String index, long expectedPitCurrent, long expectedPitCount) throws ExecutionException, + InterruptedException { + IndicesStatsRequest indicesStatsRequest = new IndicesStatsRequest(); + indicesStatsRequest.indices(index); + indicesStatsRequest.all(); + IndicesStatsResponse indicesStatsResponse = client().admin().indices().stats(indicesStatsRequest).get(); + long pitCurrent = indicesStatsResponse.getIndex(index).getTotal().search.getTotal().getPitCurrent(); + long pitCount = indicesStatsResponse.getIndex(index).getTotal().search.getTotal().getPitCount(); + assertEquals(expectedPitCurrent, pitCurrent); + assertEquals(expectedPitCount, pitCount); + } + } diff --git a/server/src/test/java/org/opensearch/search/PitMultiNodeTests.java b/server/src/test/java/org/opensearch/search/PitMultiNodeTests.java index f93a43a027da7..29126d786770e 100644 --- a/server/src/test/java/org/opensearch/search/PitMultiNodeTests.java +++ b/server/src/test/java/org/opensearch/search/PitMultiNodeTests.java @@ -36,6 +36,8 @@ import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.action.admin.indices.stats.IndicesStatsRequest; +import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; import java.util.ArrayList; import java.util.HashSet; @@ -81,6 +83,7 @@ public void testPit() throws Exception { .get(); assertEquals(2, searchResponse.getSuccessfulShards()); assertEquals(2, searchResponse.getTotalShards()); + validatePitStats("index", 2, 2); PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime()); } @@ -94,6 +97,7 @@ public Settings onNodeStopped(String nodeName) throws Exception { ExecutionException ex = expectThrows(ExecutionException.class, execute::get); assertTrue(ex.getMessage().contains("Failed to execute phase [create_pit]")); assertTrue(ex.getMessage().contains("Partial shards failure")); + validatePitStats("index", 0, 0); return super.onNodeStopped(nodeName); } }); @@ -116,6 +120,7 @@ public Settings onNodeStopped(String nodeName) throws Exception { .get(); assertEquals(1, searchResponse.getSuccessfulShards()); assertEquals(1, searchResponse.getTotalShards()); + validatePitStats("index", 1, 1); return super.onNodeStopped(nodeName); } }); @@ -137,6 +142,7 @@ public Settings onNodeStopped(String nodeName) throws Exception { assertEquals(1, searchResponse.getFailedShards()); assertEquals(0, searchResponse.getSkippedShards()); assertEquals(2, searchResponse.getTotalShards()); + validatePitStats("index", 1, 1); PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime()); return super.onNodeStopped(nodeName); } @@ -327,6 +333,18 @@ public void onFailure(Exception e) {} } } + public void validatePitStats(String index, long expectedPitCurrent, long expectedOpenContexts) throws ExecutionException, + InterruptedException { + IndicesStatsRequest indicesStatsRequest = new IndicesStatsRequest(); + indicesStatsRequest.indices("index"); + indicesStatsRequest.all(); + IndicesStatsResponse indicesStatsResponse = client().admin().indices().stats(indicesStatsRequest).get(); + long pitCurrent = indicesStatsResponse.getIndex(index).getTotal().search.getTotal().getPitCurrent(); + long openContexts = indicesStatsResponse.getIndex(index).getTotal().search.getOpenContexts(); + assertEquals(expectedPitCurrent, pitCurrent); + assertEquals(expectedOpenContexts, openContexts); + } + public void testGetAllPits() throws Exception { client().admin().indices().prepareCreate("index1").get(); CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); diff --git a/server/src/test/java/org/opensearch/search/SearchServiceTests.java b/server/src/test/java/org/opensearch/search/SearchServiceTests.java index 9c140cda6ccc8..1f824d40eb638 100644 --- a/server/src/test/java/org/opensearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/opensearch/search/SearchServiceTests.java @@ -1429,7 +1429,7 @@ private ReaderContext createReaderContext(IndexService indexService, IndexShard ); } - public void testDeletePitReaderContext() { + public void testDeletePitReaderContext() throws ExecutionException, InterruptedException { createIndex("index"); SearchService searchService = getInstanceFromNode(SearchService.class); PlainActionFuture future = new PlainActionFuture<>(); @@ -1444,6 +1444,7 @@ public void testDeletePitReaderContext() { assertThat(searchService.getActiveContexts(), equalTo(1)); assertThat(searchService.getAllPITReaderContexts().size(), equalTo(1)); + validatePitStats("index", 1, 0, 0); DeletePitResponse deletePitResponse = searchService.freeReaderContextsIfFound(contextIds); assertTrue(deletePitResponse.getDeletePitResults().get(0).isSuccessful()); // assert true for reader context not found @@ -1451,6 +1452,7 @@ public void testDeletePitReaderContext() { assertTrue(deletePitResponse.getDeletePitResults().get(0).isSuccessful()); // adding this assert to showcase behavior difference assertFalse(searchService.freeReaderContext(future.actionGet())); + validatePitStats("index", 0, 1, 0); } public void testPitContextMaxKeepAlive() { @@ -1476,7 +1478,7 @@ public void testPitContextMaxKeepAlive() { assertThat(searchService.getAllPITReaderContexts().size(), equalTo(0)); } - public void testUpdatePitId() { + public void testUpdatePitId() throws ExecutionException, InterruptedException { createIndex("index"); SearchService searchService = getInstanceFromNode(SearchService.class); PlainActionFuture future = new PlainActionFuture<>(); @@ -1497,7 +1499,9 @@ public void testUpdatePitId() { assertTrue(updateResponse.getPitId().equalsIgnoreCase("pitId")); assertThat(searchService.getActiveContexts(), equalTo(1)); assertThat(searchService.getAllPITReaderContexts().size(), equalTo(1)); + validatePitStats("index", 1, 0, 0); assertTrue(searchService.freeReaderContext(future.actionGet())); + validatePitStats("index", 0, 1, 0); } public void testUpdatePitIdMaxKeepAlive() { @@ -1552,4 +1556,13 @@ public void testUpdatePitIdWithInvalidReaderId() { assertThat(searchService.getActiveContexts(), equalTo(0)); assertThat(searchService.getAllPITReaderContexts().size(), equalTo(0)); } + + public void validatePitStats(String index, long expectedPitCurrent, long expectedPitCount, int shardId) throws ExecutionException, + InterruptedException { + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexService indexService = indicesService.indexServiceSafe(resolveIndex(index)); + IndexShard indexShard = indexService.getShard(shardId); + assertEquals(expectedPitCurrent, indexShard.searchStats().getTotal().getPitCurrent()); + assertEquals(expectedPitCount, indexShard.searchStats().getTotal().getPitCount()); + } } From 65e28dcfe10a893fd1d66c8363a7b04407a0f8f1 Mon Sep 17 00:00:00 2001 From: Sarat Vemulapalli Date: Mon, 8 Aug 2022 15:35:37 -0700 Subject: [PATCH 072/104] Updating release workflow (#4164) --- .github/workflows/auto-release.yml | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/.github/workflows/auto-release.yml b/.github/workflows/auto-release.yml index 73f3b6c2487d3..b8d3912c5864a 100644 --- a/.github/workflows/auto-release.yml +++ b/.github/workflows/auto-release.yml @@ -3,7 +3,7 @@ name: Releases on: push: tags: - - '*.*.*' + - '*' jobs: @@ -12,11 +12,18 @@ jobs: permissions: contents: write steps: + - name: GitHub App token + id: github_app_token + uses: tibdex/github-app-token@v1.5.0 + with: + app_id: ${{ secrets.APP_ID }} + private_key: ${{ secrets.APP_PRIVATE_KEY }} + installation_id: 22958780 - name: Get tag id: tag uses: dawidd6/action-get-tag@v1 - uses: actions/checkout@v2 - uses: ncipollo/release-action@v1 with: - token: ${{ secrets.GITHUB_TOKEN }} + github_token: ${{ steps.github_app_token.outputs.token }} bodyFile: release-notes/opensearch.release-notes-${{steps.tag.outputs.tag}}.md From 0f2ed704577d6a8a2fa95ee7c110dd7a2b91a141 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Mon, 8 Aug 2022 23:28:07 -0400 Subject: [PATCH 073/104] [Bug]: gradle check failing with java heap OutOfMemoryError (#4150) Signed-off-by: Andriy Redko --- gradle.properties | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gradle.properties b/gradle.properties index 86af9ad62b1a4..73df0940ce181 100644 --- a/gradle.properties +++ b/gradle.properties @@ -19,7 +19,7 @@ org.gradle.jvmargs=-Xmx3g -XX:+HeapDumpOnOutOfMemoryError -Xss2m \ --add-exports jdk.compiler/com.sun.tools.javac.parser=ALL-UNNAMED \ --add-exports jdk.compiler/com.sun.tools.javac.tree=ALL-UNNAMED \ --add-exports jdk.compiler/com.sun.tools.javac.util=ALL-UNNAMED -options.forkOptions.memoryMaximumSize=2g +options.forkOptions.memoryMaximumSize=3g # Disable duplicate project id detection # See https://docs.gradle.org/current/userguide/upgrading_version_6.html#duplicate_project_names_may_cause_publication_to_fail From a469a3cbcb7a7294a81bf7e4122968c2a9b32ca4 Mon Sep 17 00:00:00 2001 From: Bukhtawar Khan Date: Tue, 9 Aug 2022 11:49:39 +0530 Subject: [PATCH 074/104] Introduce Remote translog feature flag (#4158) * Introduce Remote translog feature flag * Limit combinations of remote segment and translog store to valid ones Signed-off-by: Bukhtawar Khan --- .../cluster/metadata/IndexMetadata.java | 38 +++++++++++ .../common/settings/IndexScopedSettings.java | 7 +- .../common/settings/SettingsModule.java | 4 +- .../org/opensearch/index/IndexSettings.java | 10 ++- .../opensearch/index/IndexSettingsTests.java | 68 ++++++++++++++++++- 5 files changed, 118 insertions(+), 9 deletions(-) diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index c534eeccd4e2b..6e39809ac1a34 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -284,6 +284,8 @@ public Iterator> settings() { ); public static final String SETTING_REMOTE_STORE_ENABLED = "index.remote_store.enabled"; + + public static final String SETTING_REMOTE_TRANSLOG_STORE_ENABLED = "index.remote_store.translog.enabled"; /** * Used to specify if the index data should be persisted in the remote store. */ @@ -294,6 +296,42 @@ public Iterator> settings() { Property.Final ); + /** + * Used to specify if the index translog operations should be persisted in the remote store. + */ + public static final Setting INDEX_REMOTE_TRANSLOG_STORE_ENABLED_SETTING = Setting.boolSetting( + SETTING_REMOTE_TRANSLOG_STORE_ENABLED, + false, + new Setting.Validator<>() { + + @Override + public void validate(final Boolean value) {} + + @Override + public void validate(final Boolean value, final Map, Object> settings) { + final Boolean isRemoteSegmentStoreEnabled = (Boolean) settings.get(INDEX_REMOTE_STORE_ENABLED_SETTING); + if (isRemoteSegmentStoreEnabled == false && value == true) { + throw new IllegalArgumentException( + "Settings " + + INDEX_REMOTE_TRANSLOG_STORE_ENABLED_SETTING.getKey() + + " cannot be enabled when " + + INDEX_REMOTE_STORE_ENABLED_SETTING.getKey() + + " is set to " + + settings.get(INDEX_REMOTE_STORE_ENABLED_SETTING) + ); + } + } + + @Override + public Iterator> settings() { + final List> settings = Collections.singletonList(INDEX_REMOTE_STORE_ENABLED_SETTING); + return settings.iterator(); + } + }, + Property.IndexScope, + Property.Final + ); + public static final String SETTING_AUTO_EXPAND_REPLICAS = "index.auto_expand_replicas"; public static final Setting INDEX_AUTO_EXPAND_REPLICAS_SETTING = AutoExpandReplicas.SETTING; diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index b0efaab733231..a3fa2c7ee3112 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -61,6 +61,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.function.Predicate; @@ -218,11 +219,11 @@ public final class IndexScopedSettings extends AbstractScopedSettings { * is ready for production release, the feature flag can be removed, and the * setting should be moved to {@link #BUILT_IN_INDEX_SETTINGS}. */ - public static final Map FEATURE_FLAGGED_INDEX_SETTINGS = Map.of( + public static final Map> FEATURE_FLAGGED_INDEX_SETTINGS = Map.of( FeatureFlags.REPLICATION_TYPE, - IndexMetadata.INDEX_REPLICATION_TYPE_SETTING, + Collections.singletonList(IndexMetadata.INDEX_REPLICATION_TYPE_SETTING), FeatureFlags.REMOTE_STORE, - IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING + Arrays.asList(IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING, IndexMetadata.INDEX_REMOTE_TRANSLOG_STORE_ENABLED_SETTING) ); public static final IndexScopedSettings DEFAULT_SCOPED_SETTINGS = new IndexScopedSettings(Settings.EMPTY, BUILT_IN_INDEX_SETTINGS); diff --git a/server/src/main/java/org/opensearch/common/settings/SettingsModule.java b/server/src/main/java/org/opensearch/common/settings/SettingsModule.java index 16b39bb2e33f9..7b4dfb7d64bb6 100644 --- a/server/src/main/java/org/opensearch/common/settings/SettingsModule.java +++ b/server/src/main/java/org/opensearch/common/settings/SettingsModule.java @@ -88,9 +88,9 @@ public SettingsModule( registerSetting(setting); } - for (Map.Entry featureFlaggedSetting : IndexScopedSettings.FEATURE_FLAGGED_INDEX_SETTINGS.entrySet()) { + for (Map.Entry> featureFlaggedSetting : IndexScopedSettings.FEATURE_FLAGGED_INDEX_SETTINGS.entrySet()) { if (FeatureFlags.isEnabled(featureFlaggedSetting.getKey())) { - registerSetting(featureFlaggedSetting.getValue()); + featureFlaggedSetting.getValue().forEach(feature -> registerSetting(feature)); } } diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index d1363540709c8..657cb1ee55cb9 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -559,6 +559,7 @@ public final class IndexSettings { private final int numberOfShards; private final ReplicationType replicationType; private final boolean isRemoteStoreEnabled; + private final boolean isRemoteTranslogStoreEnabled; // volatile fields are updated via #updateIndexMetadata(IndexMetadata) under lock private volatile Settings settings; private volatile IndexMetadata indexMetadata; @@ -719,7 +720,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti numberOfShards = settings.getAsInt(IndexMetadata.SETTING_NUMBER_OF_SHARDS, null); replicationType = ReplicationType.parseString(settings.get(IndexMetadata.SETTING_REPLICATION_TYPE)); isRemoteStoreEnabled = settings.getAsBoolean(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, false); - + isRemoteTranslogStoreEnabled = settings.getAsBoolean(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_ENABLED, false); this.searchThrottled = INDEX_SEARCH_THROTTLED.get(settings); this.queryStringLenient = QUERY_STRING_LENIENT_SETTING.get(settings); this.queryStringAnalyzeWildcard = QUERY_STRING_ANALYZE_WILDCARD.get(nodeSettings); @@ -971,6 +972,13 @@ public boolean isRemoteStoreEnabled() { return isRemoteStoreEnabled; } + /** + * Returns if remote translog store is enabled for this index. + */ + public boolean isRemoteTranslogStoreEnabled() { + return isRemoteTranslogStoreEnabled; + } + /** * Returns the node settings. The settings returned from {@link #getSettings()} are a merged version of the * index settings and the node settings where node settings are overwritten by index settings. diff --git a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java index b15b959abe6c5..90aa36253fb7f 100644 --- a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java +++ b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java @@ -41,7 +41,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.translog.Translog; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; @@ -57,7 +56,6 @@ import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.core.StringContains.containsString; import static org.hamcrest.object.HasToString.hasToString; -import static org.opensearch.common.settings.IndexScopedSettings.FEATURE_FLAGGED_INDEX_SETTINGS; public class IndexSettingsTests extends OpenSearchTestCase { @@ -777,9 +775,42 @@ public void testRemoteStoreExplicitSetting() { assertTrue(settings.isRemoteStoreEnabled()); } + public void testRemoteTranslogStoreDefaultSetting() { + IndexMetadata metadata = newIndexMeta( + "index", + Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).build() + ); + IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); + assertFalse(settings.isRemoteTranslogStoreEnabled()); + } + + public void testRemoteTranslogStoreExplicitSetting() { + IndexMetadata metadata = newIndexMeta( + "index", + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_ENABLED, true) + .build() + ); + IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); + assertTrue(settings.isRemoteTranslogStoreEnabled()); + } + + public void testRemoteTranslogStoreNullSetting() { + Settings indexSettings = Settings.builder() + .put("index.remote_store.translog.enabled", "null") + .put("index.remote_store.enabled", randomBoolean()) + .build(); + IllegalArgumentException iae = expectThrows( + IllegalArgumentException.class, + () -> IndexMetadata.INDEX_REMOTE_TRANSLOG_STORE_ENABLED_SETTING.get(indexSettings) + ); + assertEquals("Failed to parse value [null] as only [true] or [false] are allowed.", iae.getMessage()); + } + public void testUpdateRemoteStoreFails() { Set> remoteStoreSettingSet = new HashSet<>(); - remoteStoreSettingSet.add(FEATURE_FLAGGED_INDEX_SETTINGS.get(FeatureFlags.REMOTE_STORE)); + remoteStoreSettingSet.add(IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING); IndexScopedSettings settings = new IndexScopedSettings(Settings.EMPTY, remoteStoreSettingSet); IllegalArgumentException error = expectThrows( IllegalArgumentException.class, @@ -792,4 +823,35 @@ public void testUpdateRemoteStoreFails() { ); assertEquals(error.getMessage(), "final index setting [index.remote_store.enabled], not updateable"); } + + public void testUpdateRemoteTranslogStoreFails() { + Set> remoteStoreSettingSet = new HashSet<>(); + remoteStoreSettingSet.add(IndexMetadata.INDEX_REMOTE_TRANSLOG_STORE_ENABLED_SETTING); + IndexScopedSettings settings = new IndexScopedSettings(Settings.EMPTY, remoteStoreSettingSet); + IllegalArgumentException error = expectThrows( + IllegalArgumentException.class, + () -> settings.updateSettings( + Settings.builder().put("index.remote_store.translog.enabled", randomBoolean()).build(), + Settings.builder(), + Settings.builder(), + "index" + ) + ); + assertEquals(error.getMessage(), "final index setting [index.remote_store.translog.enabled], not updateable"); + } + + public void testEnablingRemoteTranslogStoreFailsWhenRemoteSegmentDisabled() { + Settings indexSettings = Settings.builder() + .put("index.remote_store.translog.enabled", true) + .put("index.remote_store.enabled", false) + .build(); + IllegalArgumentException iae = expectThrows( + IllegalArgumentException.class, + () -> IndexMetadata.INDEX_REMOTE_TRANSLOG_STORE_ENABLED_SETTING.get(indexSettings) + ); + assertEquals( + "Settings index.remote_store.translog.enabled cannot be enabled when index.remote_store.enabled is set to false", + iae.getMessage() + ); + } } From a120dd283cd174327494ef0cc63f377dd029ab7f Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Tue, 9 Aug 2022 09:58:52 -0700 Subject: [PATCH 075/104] Add IndexShard#getLatestReplicationCheckpoint behind segrep enable feature flag (#4163) * Add IndexShard#getLatestReplicationCheckpoint behind segrep enable feature flag Signed-off-by: Suraj Singh * Address review comment. Move tests to SegmentReplicationIndexShardTests Signed-off-by: Suraj Singh * Add segrep enbaled index settings in TargetServiceTests, SourceHandlerTests Signed-off-by: Suraj Singh --- .../opensearch/index/shard/IndexShard.java | 7 ++++-- .../SegmentReplicationIndexShardTests.java | 24 +++++++++++++++++++ .../SegmentReplicationSourceHandlerTests.java | 5 +++- .../SegmentReplicationTargetServiceTests.java | 6 ++++- 4 files changed, 38 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index b05eec4304cd5..53324c94c4b08 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -1396,10 +1396,13 @@ public GatedCloseable acquireSafeIndexCommit() throws EngineExcepti } /** - * Returns the lastest Replication Checkpoint that shard received. Shards will return an EMPTY checkpoint before - * the engine is opened. + * Returns the latest ReplicationCheckpoint that shard received. + * @return EMPTY checkpoint before the engine is opened and null for non-segrep enabled indices */ public ReplicationCheckpoint getLatestReplicationCheckpoint() { + if (indexSettings.isSegRepEnabled() == false) { + return null; + } if (getEngineOrNull() == null) { return ReplicationCheckpoint.empty(shardId); } diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java index 3fcf6116b11a2..4f2784db93df2 100644 --- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java @@ -14,14 +14,38 @@ import org.opensearch.index.IndexSettings; import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.replication.OpenSearchIndexLevelReplicationTestCase; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.common.ReplicationType; +import java.io.IOException; + public class SegmentReplicationIndexShardTests extends OpenSearchIndexLevelReplicationTestCase { private static final Settings settings = Settings.builder() .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .build(); + /** + * Test that latestReplicationCheckpoint returns null only for docrep enabled indices + */ + public void testReplicationCheckpointNullForDocRep() throws IOException { + Settings indexSettings = Settings.builder().put(IndexMetadata.SETTING_REPLICATION_TYPE, "DOCUMENT").put(Settings.EMPTY).build(); + final IndexShard indexShard = newStartedShard(false, indexSettings); + assertNull(indexShard.getLatestReplicationCheckpoint()); + closeShards(indexShard); + } + + /** + * Test that latestReplicationCheckpoint returns ReplicationCheckpoint for segrep enabled indices + */ + public void testReplicationCheckpointNotNullForSegReb() throws IOException { + Settings indexSettings = Settings.builder().put(IndexMetadata.SETTING_REPLICATION_TYPE, "SEGMENT").put(Settings.EMPTY).build(); + final IndexShard indexShard = newStartedShard(indexSettings); + final ReplicationCheckpoint replicationCheckpoint = indexShard.getLatestReplicationCheckpoint(); + assertNotNull(replicationCheckpoint); + closeShards(indexShard); + } + public void testIgnoreShardIdle() throws Exception { try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { shards.startAll(); diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java index 70061c54d0da2..535cd5974490f 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java @@ -15,7 +15,9 @@ import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.action.ActionListener; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.settings.Settings; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; import org.opensearch.index.store.StoreFileMetadata; @@ -41,7 +43,8 @@ public class SegmentReplicationSourceHandlerTests extends IndexShardTestCase { @Override public void setUp() throws Exception { super.setUp(); - primary = newStartedShard(true); + final Settings settings = Settings.builder().put(IndexMetadata.SETTING_REPLICATION_TYPE, "SEGMENT").put(Settings.EMPTY).build(); + primary = newStartedShard(true, settings); replica = newShard(primary.shardId(), false); recoverReplica(replica, primary, true); replicaDiscoveryNode = replica.recoveryState().getTargetNode(); diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java index 8b4bda7de50ad..b7cfda5dc1d64 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -13,6 +13,7 @@ import org.mockito.Mockito; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.index.shard.IndexShard; @@ -50,7 +51,10 @@ public class SegmentReplicationTargetServiceTests extends IndexShardTestCase { @Override public void setUp() throws Exception { super.setUp(); - final Settings settings = Settings.builder().put("node.name", SegmentReplicationTargetServiceTests.class.getSimpleName()).build(); + final Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, "SEGMENT") + .put("node.name", SegmentReplicationTargetServiceTests.class.getSimpleName()) + .build(); final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); final TransportService transportService = mock(TransportService.class); From 4751f3bd296e6952b852a457a82220ec248b037a Mon Sep 17 00:00:00 2001 From: Navneet Verma Date: Tue, 9 Aug 2022 12:56:14 -0700 Subject: [PATCH 076/104] Refactor the src and test of GeoBoundsAggregation on GeoPoint from server folder to geo module. Refactors the GeoBoundsAggregation for geo_point types from the core server to the geo module. Signed-off-by: Navneet Verma --- .../client/RestHighLevelClient.java | 3 - modules/geo/build.gradle | 4 +- .../geo/GeoModulePluginIntegTestCase.java | 47 +++ .../opensearch/geo/search/MissingValueIT.java | 59 ++++ ...ractGeoAggregatorTestCaseModulePlugin.java | 295 ++++++++++++++++++ .../aggregations/metrics/GeoBoundsIT.java | 23 +- .../org/opensearch/geo/GeoModulePlugin.java | 20 +- .../metrics/AbstractGeoBoundsAggregator.java | 128 ++++++++ .../aggregations/metrics/GeoBounds.java | 2 +- .../metrics/GeoBoundsAggregationBuilder.java | 16 +- .../metrics/GeoBoundsAggregator.java | 92 +----- .../metrics/GeoBoundsAggregatorFactory.java | 2 +- .../metrics/InternalGeoBounds.java | 2 +- .../aggregations/metrics/ParsedGeoBounds.java | 2 +- .../GeoBoundsAggregationBuilderTests.java | 43 +++ .../metrics/GeoBoundsAggregatorTests.java | 26 +- .../metrics/InternalGeoBoundsTests.java | 33 +- .../geo/tests/common/AggregationBuilders.java | 21 ++ .../common/AggregationInspectionHelper.java | 18 ++ .../geo/tests/common/RandomGeoGenerator.java | 86 +++++ .../search/aggregations/MissingValueIT.java | 24 -- .../org/opensearch/search/SearchModule.java | 8 - .../aggregations/AggregationBuilders.java | 9 - .../support/AggregationInspectionHelper.java | 5 - .../aggregations/AggregationsTests.java | 2 - .../aggregations/metrics/GeoBoundsTests.java | 53 ---- .../test/InternalAggregationTestCase.java | 3 - .../test/OpenSearchIntegTestCase.java | 1 + 28 files changed, 799 insertions(+), 228 deletions(-) create mode 100644 modules/geo/src/internalClusterTest/java/org/opensearch/geo/GeoModulePluginIntegTestCase.java create mode 100644 modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/MissingValueIT.java create mode 100644 modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoAggregatorTestCaseModulePlugin.java rename {server/src/internalClusterTest/java/org/opensearch => modules/geo/src/internalClusterTest/java/org/opensearch/geo}/search/aggregations/metrics/GeoBoundsIT.java (97%) create mode 100644 modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoBoundsAggregator.java rename {server/src/main/java/org/opensearch => modules/geo/src/main/java/org/opensearch/geo}/search/aggregations/metrics/GeoBounds.java (96%) rename {server/src/main/java/org/opensearch => modules/geo/src/main/java/org/opensearch/geo}/search/aggregations/metrics/GeoBoundsAggregationBuilder.java (93%) rename {server/src/main/java/org/opensearch => modules/geo/src/main/java/org/opensearch/geo}/search/aggregations/metrics/GeoBoundsAggregator.java (51%) rename {server/src/main/java/org/opensearch => modules/geo/src/main/java/org/opensearch/geo}/search/aggregations/metrics/GeoBoundsAggregatorFactory.java (98%) rename {server/src/main/java/org/opensearch => modules/geo/src/main/java/org/opensearch/geo}/search/aggregations/metrics/InternalGeoBounds.java (99%) rename {server/src/main/java/org/opensearch => modules/geo/src/main/java/org/opensearch/geo}/search/aggregations/metrics/ParsedGeoBounds.java (98%) create mode 100644 modules/geo/src/test/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregationBuilderTests.java rename {server/src/test/java/org/opensearch => modules/geo/src/test/java/org/opensearch/geo}/search/aggregations/metrics/GeoBoundsAggregatorTests.java (88%) rename {server/src/test/java/org/opensearch => modules/geo/src/test/java/org/opensearch/geo}/search/aggregations/metrics/InternalGeoBoundsTests.java (81%) create mode 100644 modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationBuilders.java create mode 100644 modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationInspectionHelper.java create mode 100644 modules/geo/src/test/java/org/opensearch/geo/tests/common/RandomGeoGenerator.java delete mode 100644 server/src/test/java/org/opensearch/search/aggregations/metrics/GeoBoundsTests.java diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java index d293b979debb5..7ae8f8826c5a4 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java @@ -157,7 +157,6 @@ import org.opensearch.search.aggregations.metrics.AvgAggregationBuilder; import org.opensearch.search.aggregations.metrics.CardinalityAggregationBuilder; import org.opensearch.search.aggregations.metrics.ExtendedStatsAggregationBuilder; -import org.opensearch.search.aggregations.metrics.GeoBoundsAggregationBuilder; import org.opensearch.search.aggregations.metrics.GeoCentroidAggregationBuilder; import org.opensearch.search.aggregations.metrics.InternalHDRPercentileRanks; import org.opensearch.search.aggregations.metrics.InternalHDRPercentiles; @@ -169,7 +168,6 @@ import org.opensearch.search.aggregations.metrics.ParsedAvg; import org.opensearch.search.aggregations.metrics.ParsedCardinality; import org.opensearch.search.aggregations.metrics.ParsedExtendedStats; -import org.opensearch.search.aggregations.metrics.ParsedGeoBounds; import org.opensearch.search.aggregations.metrics.ParsedGeoCentroid; import org.opensearch.search.aggregations.metrics.ParsedHDRPercentileRanks; import org.opensearch.search.aggregations.metrics.ParsedHDRPercentiles; @@ -2116,7 +2114,6 @@ static List getDefaultNamedXContents() { map.put(StatsBucketPipelineAggregationBuilder.NAME, (p, c) -> ParsedStatsBucket.fromXContent(p, (String) c)); map.put(ExtendedStatsAggregationBuilder.NAME, (p, c) -> ParsedExtendedStats.fromXContent(p, (String) c)); map.put(ExtendedStatsBucketPipelineAggregationBuilder.NAME, (p, c) -> ParsedExtendedStatsBucket.fromXContent(p, (String) c)); - map.put(GeoBoundsAggregationBuilder.NAME, (p, c) -> ParsedGeoBounds.fromXContent(p, (String) c)); map.put(GeoCentroidAggregationBuilder.NAME, (p, c) -> ParsedGeoCentroid.fromXContent(p, (String) c)); map.put(HistogramAggregationBuilder.NAME, (p, c) -> ParsedHistogram.fromXContent(p, (String) c)); map.put(DateHistogramAggregationBuilder.NAME, (p, c) -> ParsedDateHistogram.fromXContent(p, (String) c)); diff --git a/modules/geo/build.gradle b/modules/geo/build.gradle index 1146b6924b2f8..0b8e623c24ac6 100644 --- a/modules/geo/build.gradle +++ b/modules/geo/build.gradle @@ -28,9 +28,10 @@ * under the License. */ apply plugin: 'opensearch.yaml-rest-test' +apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'Placeholder plugin for geospatial features in OpenSearch. only registers geo_shape field mapper for now' + description 'Plugin for geospatial features in OpenSearch. Registering the geo_shape and aggregations GeoBounds on Geo_Shape and Geo_Point' classname 'org.opensearch.geo.GeoModulePlugin' } @@ -42,4 +43,3 @@ restResources { artifacts { restTests(project.file('src/yamlRestTest/resources/rest-api-spec/test')) } -test.enabled = false diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/GeoModulePluginIntegTestCase.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/GeoModulePluginIntegTestCase.java new file mode 100644 index 0000000000000..7dc6f2c1b89b7 --- /dev/null +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/GeoModulePluginIntegTestCase.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.geo; + +import org.opensearch.index.mapper.GeoShapeFieldMapper; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.TestGeoShapeFieldMapperPlugin; + +import java.util.Collection; +import java.util.Collections; + +/** + * This is the base class for all the Geo related integration tests. Use this class to add the features and settings + * for the test cluster on which integration tests are running. + */ +public abstract class GeoModulePluginIntegTestCase extends OpenSearchIntegTestCase { + /** + * Returns a collection of plugins that should be loaded on each node for doing the integration tests. As this + * geo plugin is not getting packaged in a zip, we need to load it before the tests run. + * + * @return List of {@link Plugin} + */ + @Override + protected Collection> nodePlugins() { + return Collections.singletonList(GeoModulePlugin.class); + } + + /** + * This was added as a backdoor to Mock the implementation of {@link GeoShapeFieldMapper} which was coming from + * {@link GeoModulePlugin}. Mock implementation is {@link TestGeoShapeFieldMapperPlugin}. Now we are using the + * {@link GeoModulePlugin} in our integration tests we need to override this functionality to avoid multiple mapper + * error. + * + * @return boolean + */ + @Override + protected boolean addMockGeoShapeFieldMapper() { + return false; + } +} diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/MissingValueIT.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/MissingValueIT.java new file mode 100644 index 0000000000000..2ac73728b2dab --- /dev/null +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/MissingValueIT.java @@ -0,0 +1,59 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.geo.search; + +import org.opensearch.action.search.SearchResponse; +import org.opensearch.geo.GeoModulePluginIntegTestCase; +import org.opensearch.geo.search.aggregations.metrics.GeoBounds; +import org.opensearch.geo.tests.common.AggregationBuilders; +import org.opensearch.test.OpenSearchIntegTestCase; + +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; +import static org.hamcrest.Matchers.closeTo; + +@OpenSearchIntegTestCase.SuiteScopeTestCase +public class MissingValueIT extends GeoModulePluginIntegTestCase { + + @Override + protected void setupSuiteScopeCluster() throws Exception { + assertAcked(prepareCreate("idx").setMapping("date", "type=date", "location", "type=geo_point", "str", "type=keyword").get()); + indexRandom( + true, + client().prepareIndex("idx").setId("1").setSource(), + client().prepareIndex("idx") + .setId("2") + .setSource("str", "foo", "long", 3L, "double", 5.5, "date", "2015-05-07", "location", "1,2") + ); + } + + public void testUnmappedGeoBounds() { + SearchResponse response = client().prepareSearch("idx") + .addAggregation(AggregationBuilders.geoBounds("bounds").field("non_existing_field").missing("2,1")) + .get(); + assertSearchResponse(response); + GeoBounds bounds = response.getAggregations().get("bounds"); + assertThat(bounds.bottomRight().lat(), closeTo(2.0, 1E-5)); + assertThat(bounds.bottomRight().lon(), closeTo(1.0, 1E-5)); + assertThat(bounds.topLeft().lat(), closeTo(2.0, 1E-5)); + assertThat(bounds.topLeft().lon(), closeTo(1.0, 1E-5)); + } + + public void testGeoBounds() { + SearchResponse response = client().prepareSearch("idx") + .addAggregation(AggregationBuilders.geoBounds("bounds").field("location").missing("2,1")) + .get(); + assertSearchResponse(response); + GeoBounds bounds = response.getAggregations().get("bounds"); + assertThat(bounds.bottomRight().lat(), closeTo(1.0, 1E-5)); + assertThat(bounds.bottomRight().lon(), closeTo(2.0, 1E-5)); + assertThat(bounds.topLeft().lat(), closeTo(2.0, 1E-5)); + assertThat(bounds.topLeft().lon(), closeTo(1.0, 1E-5)); + } +} diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoAggregatorTestCaseModulePlugin.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoAggregatorTestCaseModulePlugin.java new file mode 100644 index 0000000000000..0065cca7d6101 --- /dev/null +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoAggregatorTestCaseModulePlugin.java @@ -0,0 +1,295 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.geo.search.aggregations.metrics; + +import com.carrotsearch.hppc.ObjectIntHashMap; +import com.carrotsearch.hppc.ObjectIntMap; +import com.carrotsearch.hppc.ObjectObjectHashMap; +import com.carrotsearch.hppc.ObjectObjectMap; +import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.Strings; +import org.opensearch.common.document.DocumentField; +import org.opensearch.common.geo.GeoPoint; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.ToXContent; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.geo.GeoModulePluginIntegTestCase; +import org.opensearch.geo.tests.common.RandomGeoGenerator; +import org.opensearch.geometry.utils.Geohash; +import org.opensearch.search.SearchHit; +import org.opensearch.search.sort.SortBuilders; +import org.opensearch.search.sort.SortOrder; + +import java.util.ArrayList; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; + +/** + * This is base class for all Geo Aggregations Integration Tests. This class is similar to what we have in the server + * folder of the OpenSearch repo. As part of moving the Geo based aggregation into separate module and plugin we need + * to copy the code as we cannot depend on this class. + * GitHub issue + */ +public abstract class AbstractGeoAggregatorTestCaseModulePlugin extends GeoModulePluginIntegTestCase { + + protected static final String SINGLE_VALUED_FIELD_NAME = "geo_value"; + protected static final String MULTI_VALUED_FIELD_NAME = "geo_values"; + protected static final String NUMBER_FIELD_NAME = "l_values"; + protected static final String UNMAPPED_IDX_NAME = "idx_unmapped"; + protected static final String IDX_NAME = "idx"; + protected static final String EMPTY_IDX_NAME = "empty_idx"; + protected static final String DATELINE_IDX_NAME = "dateline_idx"; + protected static final String HIGH_CARD_IDX_NAME = "high_card_idx"; + protected static final String IDX_ZERO_NAME = "idx_zero"; + + protected static int numDocs; + protected static int numUniqueGeoPoints; + protected static GeoPoint[] singleValues, multiValues; + protected static GeoPoint singleTopLeft, singleBottomRight, multiTopLeft, multiBottomRight, singleCentroid, multiCentroid, + unmappedCentroid; + protected static ObjectIntMap expectedDocCountsForGeoHash = null; + protected static ObjectObjectMap expectedCentroidsForGeoHash = null; + protected static final double GEOHASH_TOLERANCE = 1E-5D; + + @Override + public void setupSuiteScopeCluster() throws Exception { + createIndex(UNMAPPED_IDX_NAME); + assertAcked( + prepareCreate(IDX_NAME).setMapping( + SINGLE_VALUED_FIELD_NAME, + "type=geo_point", + MULTI_VALUED_FIELD_NAME, + "type=geo_point", + NUMBER_FIELD_NAME, + "type=long", + "tag", + "type=keyword" + ) + ); + + singleTopLeft = new GeoPoint(Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY); + singleBottomRight = new GeoPoint(Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY); + multiTopLeft = new GeoPoint(Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY); + multiBottomRight = new GeoPoint(Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY); + singleCentroid = new GeoPoint(0, 0); + multiCentroid = new GeoPoint(0, 0); + unmappedCentroid = new GeoPoint(0, 0); + + numDocs = randomIntBetween(6, 20); + numUniqueGeoPoints = randomIntBetween(1, numDocs); + expectedDocCountsForGeoHash = new ObjectIntHashMap<>(numDocs * 2); + expectedCentroidsForGeoHash = new ObjectObjectHashMap<>(numDocs * 2); + + singleValues = new GeoPoint[numUniqueGeoPoints]; + for (int i = 0; i < singleValues.length; i++) { + singleValues[i] = RandomGeoGenerator.randomPoint(random()); + updateBoundsTopLeft(singleValues[i], singleTopLeft); + updateBoundsBottomRight(singleValues[i], singleBottomRight); + } + + multiValues = new GeoPoint[numUniqueGeoPoints]; + for (int i = 0; i < multiValues.length; i++) { + multiValues[i] = RandomGeoGenerator.randomPoint(random()); + updateBoundsTopLeft(multiValues[i], multiTopLeft); + updateBoundsBottomRight(multiValues[i], multiBottomRight); + } + + List builders = new ArrayList<>(); + + GeoPoint singleVal; + final GeoPoint[] multiVal = new GeoPoint[2]; + double newMVLat, newMVLon; + for (int i = 0; i < numDocs; i++) { + singleVal = singleValues[i % numUniqueGeoPoints]; + multiVal[0] = multiValues[i % numUniqueGeoPoints]; + multiVal[1] = multiValues[(i + 1) % numUniqueGeoPoints]; + builders.add( + client().prepareIndex(IDX_NAME) + .setSource( + jsonBuilder().startObject() + .array(SINGLE_VALUED_FIELD_NAME, singleVal.lon(), singleVal.lat()) + .startArray(MULTI_VALUED_FIELD_NAME) + .startArray() + .value(multiVal[0].lon()) + .value(multiVal[0].lat()) + .endArray() + .startArray() + .value(multiVal[1].lon()) + .value(multiVal[1].lat()) + .endArray() + .endArray() + .field(NUMBER_FIELD_NAME, i) + .field("tag", "tag" + i) + .endObject() + ) + ); + singleCentroid = singleCentroid.reset( + singleCentroid.lat() + (singleVal.lat() - singleCentroid.lat()) / (i + 1), + singleCentroid.lon() + (singleVal.lon() - singleCentroid.lon()) / (i + 1) + ); + newMVLat = (multiVal[0].lat() + multiVal[1].lat()) / 2d; + newMVLon = (multiVal[0].lon() + multiVal[1].lon()) / 2d; + multiCentroid = multiCentroid.reset( + multiCentroid.lat() + (newMVLat - multiCentroid.lat()) / (i + 1), + multiCentroid.lon() + (newMVLon - multiCentroid.lon()) / (i + 1) + ); + } + + assertAcked(prepareCreate(EMPTY_IDX_NAME).setMapping(SINGLE_VALUED_FIELD_NAME, "type=geo_point")); + + assertAcked( + prepareCreate(DATELINE_IDX_NAME).setMapping( + SINGLE_VALUED_FIELD_NAME, + "type=geo_point", + MULTI_VALUED_FIELD_NAME, + "type=geo_point", + NUMBER_FIELD_NAME, + "type=long", + "tag", + "type=keyword" + ) + ); + + GeoPoint[] geoValues = new GeoPoint[5]; + geoValues[0] = new GeoPoint(38, 178); + geoValues[1] = new GeoPoint(12, -179); + geoValues[2] = new GeoPoint(-24, 170); + geoValues[3] = new GeoPoint(32, -175); + geoValues[4] = new GeoPoint(-11, 178); + + for (int i = 0; i < 5; i++) { + builders.add( + client().prepareIndex(DATELINE_IDX_NAME) + .setSource( + jsonBuilder().startObject() + .array(SINGLE_VALUED_FIELD_NAME, geoValues[i].lon(), geoValues[i].lat()) + .field(NUMBER_FIELD_NAME, i) + .field("tag", "tag" + i) + .endObject() + ) + ); + } + assertAcked( + prepareCreate(HIGH_CARD_IDX_NAME).setSettings(Settings.builder().put("number_of_shards", 2)) + .setMapping( + SINGLE_VALUED_FIELD_NAME, + "type=geo_point", + MULTI_VALUED_FIELD_NAME, + "type=geo_point", + NUMBER_FIELD_NAME, + "type=long,store=true", + "tag", + "type=keyword" + ) + ); + + for (int i = 0; i < 2000; i++) { + singleVal = singleValues[i % numUniqueGeoPoints]; + builders.add( + client().prepareIndex(HIGH_CARD_IDX_NAME) + .setSource( + jsonBuilder().startObject() + .array(SINGLE_VALUED_FIELD_NAME, singleVal.lon(), singleVal.lat()) + .startArray(MULTI_VALUED_FIELD_NAME) + .startArray() + .value(multiValues[i % numUniqueGeoPoints].lon()) + .value(multiValues[i % numUniqueGeoPoints].lat()) + .endArray() + .startArray() + .value(multiValues[(i + 1) % numUniqueGeoPoints].lon()) + .value(multiValues[(i + 1) % numUniqueGeoPoints].lat()) + .endArray() + .endArray() + .field(NUMBER_FIELD_NAME, i) + .field("tag", "tag" + i) + .endObject() + ) + ); + updateGeohashBucketsCentroid(singleVal); + } + + builders.add( + client().prepareIndex(IDX_ZERO_NAME) + .setSource(jsonBuilder().startObject().array(SINGLE_VALUED_FIELD_NAME, 0.0, 1.0).endObject()) + ); + assertAcked(prepareCreate(IDX_ZERO_NAME).setMapping(SINGLE_VALUED_FIELD_NAME, "type=geo_point")); + + indexRandom(true, builders); + ensureSearchable(); + + // Added to debug a test failure where the terms aggregation seems to be reporting two documents with the same + // value for NUMBER_FIELD_NAME. This will check that after random indexing each document only has 1 value for + // NUMBER_FIELD_NAME and it is the correct value. Following this initial change its seems that this call was getting + // more that 2000 hits (actual value was 2059) so now it will also check to ensure all hits have the correct index and type. + SearchResponse response = client().prepareSearch(HIGH_CARD_IDX_NAME) + .addStoredField(NUMBER_FIELD_NAME) + .addSort(SortBuilders.fieldSort(NUMBER_FIELD_NAME).order(SortOrder.ASC)) + .setSize(5000) + .get(); + assertSearchResponse(response); + long totalHits = response.getHits().getTotalHits().value; + XContentBuilder builder = XContentFactory.jsonBuilder(); + response.toXContent(builder, ToXContent.EMPTY_PARAMS); + logger.info("Full high_card_idx Response Content:\n{ {} }", Strings.toString(builder)); + for (int i = 0; i < totalHits; i++) { + SearchHit searchHit = response.getHits().getAt(i); + assertThat("Hit " + i + " with id: " + searchHit.getId(), searchHit.getIndex(), equalTo("high_card_idx")); + DocumentField hitField = searchHit.field(NUMBER_FIELD_NAME); + + assertThat("Hit " + i + " has wrong number of values", hitField.getValues().size(), equalTo(1)); + Long value = hitField.getValue(); + assertThat("Hit " + i + " has wrong value", value.intValue(), equalTo(i)); + } + assertThat(totalHits, equalTo(2000L)); + } + + private void updateGeohashBucketsCentroid(final GeoPoint location) { + String hash = Geohash.stringEncode(location.lon(), location.lat(), Geohash.PRECISION); + for (int precision = Geohash.PRECISION; precision > 0; --precision) { + final String h = hash.substring(0, precision); + expectedDocCountsForGeoHash.put(h, expectedDocCountsForGeoHash.getOrDefault(h, 0) + 1); + expectedCentroidsForGeoHash.put(h, updateHashCentroid(h, location)); + } + } + + private GeoPoint updateHashCentroid(String hash, final GeoPoint location) { + GeoPoint centroid = expectedCentroidsForGeoHash.getOrDefault(hash, null); + if (centroid == null) { + return new GeoPoint(location.lat(), location.lon()); + } + final int docCount = expectedDocCountsForGeoHash.get(hash); + final double newLon = centroid.lon() + (location.lon() - centroid.lon()) / docCount; + final double newLat = centroid.lat() + (location.lat() - centroid.lat()) / docCount; + return centroid.reset(newLat, newLon); + } + + private void updateBoundsBottomRight(GeoPoint geoPoint, GeoPoint currentBound) { + if (geoPoint.lat() < currentBound.lat()) { + currentBound.resetLat(geoPoint.lat()); + } + if (geoPoint.lon() > currentBound.lon()) { + currentBound.resetLon(geoPoint.lon()); + } + } + + private void updateBoundsTopLeft(GeoPoint geoPoint, GeoPoint currentBound) { + if (geoPoint.lat() > currentBound.lat()) { + currentBound.resetLat(geoPoint.lat()); + } + if (geoPoint.lon() < currentBound.lon()) { + currentBound.resetLon(geoPoint.lon()); + } + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/GeoBoundsIT.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsIT.java similarity index 97% rename from server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/GeoBoundsIT.java rename to modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsIT.java index 3af3b9e5212f8..5cbd98a4936e4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/GeoBoundsIT.java +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsIT.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.metrics; +package org.opensearch.geo.search.aggregations.metrics; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.geo.GeoPoint; @@ -43,21 +43,21 @@ import java.util.List; -import static org.opensearch.index.query.QueryBuilders.matchAllQuery; -import static org.opensearch.search.aggregations.AggregationBuilders.geoBounds; -import static org.opensearch.search.aggregations.AggregationBuilders.global; -import static org.opensearch.search.aggregations.AggregationBuilders.terms; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; +import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.allOf; -import static org.hamcrest.Matchers.closeTo; -import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; -import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.sameInstance; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.closeTo; +import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.aggregations.AggregationBuilders.global; +import static org.opensearch.search.aggregations.AggregationBuilders.terms; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; +import static org.opensearch.geo.tests.common.AggregationBuilders.geoBounds; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class GeoBoundsIT extends AbstractGeoTestCase { +public class GeoBoundsIT extends AbstractGeoAggregatorTestCaseModulePlugin { private static final String aggName = "geoBounds"; public void testSingleValuedField() throws Exception { @@ -226,7 +226,8 @@ public void testSingleValuedFieldNearDateLineWrapLongitude() throws Exception { } /** - * This test forces the {@link GeoBoundsAggregator} to resize the {@link BigArray}s it uses to ensure they are resized correctly + * This test forces the {@link GeoBoundsAggregator} to resize the {@link BigArray}s it uses to ensure they are + * resized correctly */ public void testSingleValuedFieldAsSubAggToHighCardTermsAgg() { SearchResponse response = client().prepareSearch(HIGH_CARD_IDX_NAME) diff --git a/modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java b/modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java index ed8c7c06fb3e8..64aac66b7eef3 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java +++ b/modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java @@ -32,18 +32,36 @@ package org.opensearch.geo; +import org.opensearch.geo.search.aggregations.metrics.GeoBounds; +import org.opensearch.geo.search.aggregations.metrics.GeoBoundsAggregationBuilder; +import org.opensearch.geo.search.aggregations.metrics.InternalGeoBounds; import org.opensearch.index.mapper.GeoShapeFieldMapper; import org.opensearch.index.mapper.Mapper; import org.opensearch.plugins.MapperPlugin; import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.SearchPlugin; import java.util.Collections; +import java.util.List; import java.util.Map; -public class GeoModulePlugin extends Plugin implements MapperPlugin { +public class GeoModulePlugin extends Plugin implements MapperPlugin, SearchPlugin { @Override public Map getMappers() { return Collections.singletonMap(GeoShapeFieldMapper.CONTENT_TYPE, new GeoShapeFieldMapper.TypeParser()); } + + /** + * Registering {@link GeoBounds} aggregation on GeoPoint field. + */ + @Override + public List getAggregations() { + final AggregationSpec spec = new AggregationSpec( + GeoBoundsAggregationBuilder.NAME, + GeoBoundsAggregationBuilder::new, + GeoBoundsAggregationBuilder.PARSER + ).addResultReader(InternalGeoBounds::new).setAggregatorRegistrar(GeoBoundsAggregationBuilder::registerAggregators); + return Collections.singletonList(spec); + } } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoBoundsAggregator.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoBoundsAggregator.java new file mode 100644 index 0000000000000..4a39fa1da04eb --- /dev/null +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoBoundsAggregator.java @@ -0,0 +1,128 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.geo.search.aggregations.metrics; + +import org.opensearch.common.lease.Releasables; +import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.DoubleArray; +import org.opensearch.search.aggregations.Aggregator; +import org.opensearch.search.aggregations.InternalAggregation; +import org.opensearch.search.aggregations.metrics.MetricsAggregator; +import org.opensearch.search.aggregations.support.ValuesSource; +import org.opensearch.search.aggregations.support.ValuesSourceConfig; +import org.opensearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.Map; + +/** + * Abstract class for doing the {@link GeoBounds} Aggregation over fields of type geo_shape and geo_point. + * + * @param Class extending the {@link ValuesSource} which will provide the data on which aggregation will happen. + * @opensearch.internal + */ +public abstract class AbstractGeoBoundsAggregator extends MetricsAggregator { + + protected final T valuesSource; + protected final boolean wrapLongitude; + protected DoubleArray tops; + protected DoubleArray bottoms; + protected DoubleArray posLefts; + protected DoubleArray posRights; + protected DoubleArray negLefts; + protected DoubleArray negRights; + + @SuppressWarnings("unchecked") + protected AbstractGeoBoundsAggregator( + String name, + SearchContext searchContext, + Aggregator aggregator, + ValuesSourceConfig valuesSourceConfig, + boolean wrapLongitude, + Map metaData + ) throws IOException { + super(name, searchContext, aggregator, metaData); + this.wrapLongitude = wrapLongitude; + valuesSource = valuesSourceConfig.hasValues() ? (T) valuesSourceConfig.getValuesSource() : null; + + if (valuesSource != null) { + final BigArrays bigArrays = context.bigArrays(); + tops = bigArrays.newDoubleArray(1, false); + tops.fill(0, tops.size(), Double.NEGATIVE_INFINITY); + bottoms = bigArrays.newDoubleArray(1, false); + bottoms.fill(0, bottoms.size(), Double.POSITIVE_INFINITY); + posLefts = bigArrays.newDoubleArray(1, false); + posLefts.fill(0, posLefts.size(), Double.POSITIVE_INFINITY); + posRights = bigArrays.newDoubleArray(1, false); + posRights.fill(0, posRights.size(), Double.NEGATIVE_INFINITY); + negLefts = bigArrays.newDoubleArray(1, false); + negLefts.fill(0, negLefts.size(), Double.POSITIVE_INFINITY); + negRights = bigArrays.newDoubleArray(1, false); + negRights.fill(0, negRights.size(), Double.NEGATIVE_INFINITY); + } + } + + /** + * Build an empty aggregation. + */ + @Override + public InternalAggregation buildEmptyAggregation() { + return new InternalGeoBounds( + name, + Double.NEGATIVE_INFINITY, + Double.POSITIVE_INFINITY, + Double.POSITIVE_INFINITY, + Double.NEGATIVE_INFINITY, + Double.POSITIVE_INFINITY, + Double.NEGATIVE_INFINITY, + wrapLongitude, + metadata() + ); + } + + /** + * Build an aggregation for data that has been collected into owningBucketOrd. + */ + @Override + public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException { + if (valuesSource == null) { + return buildEmptyAggregation(); + } + double top = tops.get(owningBucketOrdinal); + double bottom = bottoms.get(owningBucketOrdinal); + double posLeft = posLefts.get(owningBucketOrdinal); + double posRight = posRights.get(owningBucketOrdinal); + double negLeft = negLefts.get(owningBucketOrdinal); + double negRight = negRights.get(owningBucketOrdinal); + return new InternalGeoBounds(name, top, bottom, posLeft, posRight, negLeft, negRight, wrapLongitude, metadata()); + } + + @Override + public void doClose() { + Releasables.close(tops, bottoms, posLefts, posRights, negLefts, negRights); + } + + protected void setBucketSize(final long bucket, final BigArrays bigArrays) { + if (bucket >= tops.size()) { + long from = tops.size(); + tops = bigArrays.grow(tops, bucket + 1); + tops.fill(from, tops.size(), Double.NEGATIVE_INFINITY); + bottoms = bigArrays.resize(bottoms, tops.size()); + bottoms.fill(from, bottoms.size(), Double.POSITIVE_INFINITY); + posLefts = bigArrays.resize(posLefts, tops.size()); + posLefts.fill(from, posLefts.size(), Double.POSITIVE_INFINITY); + posRights = bigArrays.resize(posRights, tops.size()); + posRights.fill(from, posRights.size(), Double.NEGATIVE_INFINITY); + negLefts = bigArrays.resize(negLefts, tops.size()); + negLefts.fill(from, negLefts.size(), Double.POSITIVE_INFINITY); + negRights = bigArrays.resize(negRights, tops.size()); + negRights.fill(from, negRights.size(), Double.NEGATIVE_INFINITY); + } + } +} diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/GeoBounds.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoBounds.java similarity index 96% rename from server/src/main/java/org/opensearch/search/aggregations/metrics/GeoBounds.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoBounds.java index 380fbce85ada7..81ef502dda130 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/GeoBounds.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoBounds.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.metrics; +package org.opensearch.geo.search.aggregations.metrics; import org.opensearch.common.geo.GeoPoint; import org.opensearch.search.aggregations.Aggregation; diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/GeoBoundsAggregationBuilder.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregationBuilder.java similarity index 93% rename from server/src/main/java/org/opensearch/search/aggregations/metrics/GeoBoundsAggregationBuilder.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregationBuilder.java index 64e27fa7e13d1..b2c441f9a951c 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/GeoBoundsAggregationBuilder.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregationBuilder.java @@ -30,8 +30,9 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.metrics; +package org.opensearch.geo.search.aggregations.metrics; +import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.xcontent.ObjectParser; @@ -40,6 +41,7 @@ import org.opensearch.search.aggregations.AggregationBuilder; import org.opensearch.search.aggregations.AggregatorFactories; import org.opensearch.search.aggregations.AggregatorFactory; +import org.opensearch.search.aggregations.metrics.GeoBoundsAggregatorSupplier; import org.opensearch.search.aggregations.support.CoreValuesSourceType; import org.opensearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.opensearch.search.aggregations.support.ValuesSourceConfig; @@ -57,6 +59,7 @@ */ public class GeoBoundsAggregationBuilder extends ValuesSourceAggregationBuilder { public static final String NAME = "geo_bounds"; + private static final ParseField WRAP_LONGITUDE_FIELD = new ParseField("wrap_longitude"); public static final ValuesSourceRegistry.RegistryKey REGISTRY_KEY = new ValuesSourceRegistry.RegistryKey<>( NAME, GeoBoundsAggregatorSupplier.class @@ -68,7 +71,7 @@ public class GeoBoundsAggregationBuilder extends ValuesSourceAggregationBuilder< ); static { ValuesSourceAggregationBuilder.declareFields(PARSER, false, false, false); - PARSER.declareBoolean(GeoBoundsAggregationBuilder::wrapLongitude, GeoBoundsAggregator.WRAP_LONGITUDE_FIELD); + PARSER.declareBoolean(GeoBoundsAggregationBuilder::wrapLongitude, WRAP_LONGITUDE_FIELD); } public static void registerAggregators(ValuesSourceRegistry.Builder builder) { @@ -121,13 +124,6 @@ public GeoBoundsAggregationBuilder wrapLongitude(boolean wrapLongitude) { return this; } - /** - * Get whether to wrap longitudes. - */ - public boolean wrapLongitude() { - return wrapLongitude; - } - @Override public BucketCardinality bucketCardinality() { return BucketCardinality.NONE; @@ -145,7 +141,7 @@ protected GeoBoundsAggregatorFactory innerBuild( @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { - builder.field(GeoBoundsAggregator.WRAP_LONGITUDE_FIELD.getPreferredName(), wrapLongitude); + builder.field(WRAP_LONGITUDE_FIELD.getPreferredName(), wrapLongitude); return builder; } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/GeoBoundsAggregator.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregator.java similarity index 51% rename from server/src/main/java/org/opensearch/search/aggregations/metrics/GeoBoundsAggregator.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregator.java index 054e8d4cb1c6c..a6518ea702be6 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/GeoBoundsAggregator.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregator.java @@ -30,17 +30,13 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.metrics; +package org.opensearch.geo.search.aggregations.metrics; import org.apache.lucene.index.LeafReaderContext; -import org.opensearch.common.ParseField; import org.opensearch.common.geo.GeoPoint; -import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.BigArrays; -import org.opensearch.common.util.DoubleArray; import org.opensearch.index.fielddata.MultiGeoPointValues; import org.opensearch.search.aggregations.Aggregator; -import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.LeafBucketCollector; import org.opensearch.search.aggregations.LeafBucketCollectorBase; import org.opensearch.search.aggregations.support.ValuesSource; @@ -51,22 +47,11 @@ import java.util.Map; /** - * Aggregate all docs into a geographic bounds + * Aggregate all docs into a geographic bounds for field GeoPoint. * * @opensearch.internal */ -final class GeoBoundsAggregator extends MetricsAggregator { - - static final ParseField WRAP_LONGITUDE_FIELD = new ParseField("wrap_longitude"); - - private final ValuesSource.GeoPoint valuesSource; - private final boolean wrapLongitude; - DoubleArray tops; - DoubleArray bottoms; - DoubleArray posLefts; - DoubleArray posRights; - DoubleArray negLefts; - DoubleArray negRights; +final class GeoBoundsAggregator extends AbstractGeoBoundsAggregator { GeoBoundsAggregator( String name, @@ -76,25 +61,7 @@ final class GeoBoundsAggregator extends MetricsAggregator { boolean wrapLongitude, Map metadata ) throws IOException { - super(name, aggregationContext, parent, metadata); - // TODO: stop expecting nulls here - this.valuesSource = valuesSourceConfig.hasValues() ? (ValuesSource.GeoPoint) valuesSourceConfig.getValuesSource() : null; - this.wrapLongitude = wrapLongitude; - if (valuesSource != null) { - final BigArrays bigArrays = context.bigArrays(); - tops = bigArrays.newDoubleArray(1, false); - tops.fill(0, tops.size(), Double.NEGATIVE_INFINITY); - bottoms = bigArrays.newDoubleArray(1, false); - bottoms.fill(0, bottoms.size(), Double.POSITIVE_INFINITY); - posLefts = bigArrays.newDoubleArray(1, false); - posLefts.fill(0, posLefts.size(), Double.POSITIVE_INFINITY); - posRights = bigArrays.newDoubleArray(1, false); - posRights.fill(0, posRights.size(), Double.NEGATIVE_INFINITY); - negLefts = bigArrays.newDoubleArray(1, false); - negLefts.fill(0, negLefts.size(), Double.POSITIVE_INFINITY); - negRights = bigArrays.newDoubleArray(1, false); - negRights.fill(0, negRights.size(), Double.NEGATIVE_INFINITY); - } + super(name, aggregationContext, parent, valuesSourceConfig, wrapLongitude, metadata); } @Override @@ -107,25 +74,10 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCol return new LeafBucketCollectorBase(sub, values) { @Override public void collect(int doc, long bucket) throws IOException { - if (bucket >= tops.size()) { - long from = tops.size(); - tops = bigArrays.grow(tops, bucket + 1); - tops.fill(from, tops.size(), Double.NEGATIVE_INFINITY); - bottoms = bigArrays.resize(bottoms, tops.size()); - bottoms.fill(from, bottoms.size(), Double.POSITIVE_INFINITY); - posLefts = bigArrays.resize(posLefts, tops.size()); - posLefts.fill(from, posLefts.size(), Double.POSITIVE_INFINITY); - posRights = bigArrays.resize(posRights, tops.size()); - posRights.fill(from, posRights.size(), Double.NEGATIVE_INFINITY); - negLefts = bigArrays.resize(negLefts, tops.size()); - negLefts.fill(from, negLefts.size(), Double.POSITIVE_INFINITY); - negRights = bigArrays.resize(negRights, tops.size()); - negRights.fill(from, negRights.size(), Double.NEGATIVE_INFINITY); - } + setBucketSize(bucket, bigArrays); if (values.advanceExact(doc)) { final int valuesCount = values.docValueCount(); - for (int i = 0; i < valuesCount; ++i) { GeoPoint value = values.nextValue(); double top = tops.get(bucket); @@ -163,38 +115,4 @@ public void collect(int doc, long bucket) throws IOException { } }; } - - @Override - public InternalAggregation buildAggregation(long owningBucketOrdinal) { - if (valuesSource == null) { - return buildEmptyAggregation(); - } - double top = tops.get(owningBucketOrdinal); - double bottom = bottoms.get(owningBucketOrdinal); - double posLeft = posLefts.get(owningBucketOrdinal); - double posRight = posRights.get(owningBucketOrdinal); - double negLeft = negLefts.get(owningBucketOrdinal); - double negRight = negRights.get(owningBucketOrdinal); - return new InternalGeoBounds(name, top, bottom, posLeft, posRight, negLeft, negRight, wrapLongitude, metadata()); - } - - @Override - public InternalAggregation buildEmptyAggregation() { - return new InternalGeoBounds( - name, - Double.NEGATIVE_INFINITY, - Double.POSITIVE_INFINITY, - Double.POSITIVE_INFINITY, - Double.NEGATIVE_INFINITY, - Double.POSITIVE_INFINITY, - Double.NEGATIVE_INFINITY, - wrapLongitude, - metadata() - ); - } - - @Override - public void doClose() { - Releasables.close(tops, bottoms, posLefts, posRights, negLefts, negRights); - } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/GeoBoundsAggregatorFactory.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregatorFactory.java similarity index 98% rename from server/src/main/java/org/opensearch/search/aggregations/metrics/GeoBoundsAggregatorFactory.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregatorFactory.java index 2c6b75842b6f5..149e052b4db7d 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/GeoBoundsAggregatorFactory.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregatorFactory.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.metrics; +package org.opensearch.geo.search.aggregations.metrics; import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.aggregations.Aggregator; diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalGeoBounds.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/InternalGeoBounds.java similarity index 99% rename from server/src/main/java/org/opensearch/search/aggregations/metrics/InternalGeoBounds.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/InternalGeoBounds.java index 87018242ee8df..7c708de88a49c 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalGeoBounds.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/InternalGeoBounds.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.metrics; +package org.opensearch.geo.search.aggregations.metrics; import org.opensearch.common.geo.GeoBoundingBox; import org.opensearch.common.geo.GeoPoint; diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/ParsedGeoBounds.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/ParsedGeoBounds.java similarity index 98% rename from server/src/main/java/org/opensearch/search/aggregations/metrics/ParsedGeoBounds.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/ParsedGeoBounds.java index a482fcfdf08dd..7643ac9d9a010 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/ParsedGeoBounds.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/ParsedGeoBounds.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.metrics; +package org.opensearch.geo.search.aggregations.metrics; import org.opensearch.common.Nullable; import org.opensearch.common.collect.Tuple; diff --git a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregationBuilderTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregationBuilderTests.java new file mode 100644 index 0000000000000..49b455bbf389e --- /dev/null +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregationBuilderTests.java @@ -0,0 +1,43 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.geo.search.aggregations.metrics; + +import org.opensearch.geo.GeoModulePlugin; +import org.opensearch.plugins.Plugin; +import org.opensearch.search.aggregations.BaseAggregationTestCase; + +import java.util.Collection; +import java.util.Collections; + +public class GeoBoundsAggregationBuilderTests extends BaseAggregationTestCase { + + /** + * This registers the GeoShape mapper with the Tests so that it can be used for testing the aggregation builders + * + * @return A Collection containing {@link GeoModulePlugin} + */ + protected Collection> getPlugins() { + return Collections.singletonList(GeoModulePlugin.class); + } + + @Override + protected GeoBoundsAggregationBuilder createTestAggregatorBuilder() { + GeoBoundsAggregationBuilder factory = new GeoBoundsAggregationBuilder(randomAlphaOfLengthBetween(1, 20)); + String field = randomAlphaOfLengthBetween(3, 20); + factory.field(field); + if (randomBoolean()) { + factory.wrapLongitude(randomBoolean()); + } + if (randomBoolean()) { + factory.missing("0,0"); + } + return factory; + } + +} diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/GeoBoundsAggregatorTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregatorTests.java similarity index 88% rename from server/src/test/java/org/opensearch/search/aggregations/metrics/GeoBoundsAggregatorTests.java rename to modules/geo/src/test/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregatorTests.java index 6440c62e58e18..ee7a3c7e3faa2 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/GeoBoundsAggregatorTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregatorTests.java @@ -30,26 +30,42 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.metrics; +package org.opensearch.geo.search.aggregations.metrics; import org.apache.lucene.document.Document; import org.apache.lucene.document.LatLonDocValuesField; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.opensearch.common.geo.GeoPoint; +import org.opensearch.geo.GeoModulePlugin; +import org.opensearch.geo.tests.common.AggregationInspectionHelper; +import org.opensearch.geo.tests.common.RandomGeoGenerator; import org.opensearch.index.mapper.GeoPointFieldMapper; import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.plugins.SearchPlugin; import org.opensearch.search.aggregations.AggregatorTestCase; -import org.opensearch.search.aggregations.support.AggregationInspectionHelper; -import org.opensearch.test.geo.RandomGeoGenerator; -import static org.opensearch.search.aggregations.metrics.InternalGeoBoundsTests.GEOHASH_TOLERANCE; +import java.util.Collections; +import java.util.List; + import static org.hamcrest.Matchers.closeTo; public class GeoBoundsAggregatorTests extends AggregatorTestCase { + public static final double GEOHASH_TOLERANCE = 1E-5D; + + /** + * Overriding the Search Plugins list with {@link GeoModulePlugin} so that the testcase will know that this plugin is + * to be loaded during the tests. + * @return List of {@link SearchPlugin} + */ + @Override + protected List getSearchPlugins() { + return Collections.singletonList(new GeoModulePlugin()); + } + public void testEmpty() throws Exception { try (Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { GeoBoundsAggregationBuilder aggBuilder = new GeoBoundsAggregationBuilder("my_agg").field("field").wrapLongitude(false); diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalGeoBoundsTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/metrics/InternalGeoBoundsTests.java similarity index 81% rename from server/src/test/java/org/opensearch/search/aggregations/metrics/InternalGeoBoundsTests.java rename to modules/geo/src/test/java/org/opensearch/geo/search/aggregations/metrics/InternalGeoBoundsTests.java index e3857efff5d4d..22915212ff415 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalGeoBoundsTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/metrics/InternalGeoBoundsTests.java @@ -30,11 +30,18 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.metrics; +package org.opensearch.geo.search.aggregations.metrics; +import org.opensearch.common.ParseField; +import org.opensearch.common.xcontent.ContextParser; +import org.opensearch.common.xcontent.NamedXContentRegistry; +import org.opensearch.geo.GeoModulePlugin; +import org.opensearch.plugins.SearchPlugin; +import org.opensearch.search.aggregations.Aggregation; import org.opensearch.search.aggregations.ParsedAggregation; import org.opensearch.test.InternalAggregationTestCase; +import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -44,6 +51,30 @@ public class InternalGeoBoundsTests extends InternalAggregationTestCase { static final double GEOHASH_TOLERANCE = 1E-5D; + /** + * Overriding the method so that tests can get the aggregation specs for namedWriteable. + * + * @return GeoPlugin + */ + @Override + protected SearchPlugin registerPlugin() { + return new GeoModulePlugin(); + } + + /** + * Overriding with the {@link ParsedGeoBounds} so that it can be parsed. We need to do this as {@link GeoModulePlugin} + * is registering this Aggregation. + * + * @return a List of {@link NamedXContentRegistry.Entry} + */ + @Override + protected List getNamedXContents() { + final List namedXContents = new ArrayList<>(getDefaultNamedXContents()); + final ContextParser parser = (p, c) -> ParsedGeoBounds.fromXContent(p, (String) c); + namedXContents.add(new NamedXContentRegistry.Entry(Aggregation.class, new ParseField(GeoBoundsAggregationBuilder.NAME), parser)); + return namedXContents; + } + @Override protected InternalGeoBounds createTestInstance(String name, Map metadata) { // we occasionally want to test top = Double.NEGATIVE_INFINITY since this triggers empty xContent object diff --git a/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationBuilders.java b/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationBuilders.java new file mode 100644 index 0000000000000..c1f27b71c326d --- /dev/null +++ b/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationBuilders.java @@ -0,0 +1,21 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.geo.tests.common; + +import org.opensearch.geo.search.aggregations.metrics.GeoBounds; +import org.opensearch.geo.search.aggregations.metrics.GeoBoundsAggregationBuilder; + +public class AggregationBuilders { + /** + * Create a new {@link GeoBounds} aggregation with the given name. + */ + public static GeoBoundsAggregationBuilder geoBounds(String name) { + return new GeoBoundsAggregationBuilder(name); + } +} diff --git a/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationInspectionHelper.java b/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationInspectionHelper.java new file mode 100644 index 0000000000000..208187bf34a5c --- /dev/null +++ b/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationInspectionHelper.java @@ -0,0 +1,18 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.geo.tests.common; + +import org.opensearch.geo.search.aggregations.metrics.InternalGeoBounds; + +public class AggregationInspectionHelper { + + public static boolean hasValue(InternalGeoBounds agg) { + return (agg.topLeft() == null && agg.bottomRight() == null) == false; + } +} diff --git a/modules/geo/src/test/java/org/opensearch/geo/tests/common/RandomGeoGenerator.java b/modules/geo/src/test/java/org/opensearch/geo/tests/common/RandomGeoGenerator.java new file mode 100644 index 0000000000000..2cf32c36b97ec --- /dev/null +++ b/modules/geo/src/test/java/org/opensearch/geo/tests/common/RandomGeoGenerator.java @@ -0,0 +1,86 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.geo.tests.common; + +import org.opensearch.common.geo.GeoPoint; + +import java.util.Random; + +/** + * Random geo generation utilities for randomized {@code geo_point} type testing + * does not depend on jts or spatial4j. Use RandomShapeGenerator to create random OGC compliant shapes. + * This is a copy of the file present in the server folder. We need to keep both as there are tests which are + * dependent on that file. + */ +public class RandomGeoGenerator { + + public static void randomPoint(Random r, double[] pt) { + final double[] min = { -180, -90 }; + final double[] max = { 180, 90 }; + randomPointIn(r, min[0], min[1], max[0], max[1], pt); + } + + public static void randomPointIn( + Random r, + final double minLon, + final double minLat, + final double maxLon, + final double maxLat, + double[] pt + ) { + assert pt != null && pt.length == 2; + + // normalize min and max + double[] min = { normalizeLongitude(minLon), normalizeLatitude(minLat) }; + double[] max = { normalizeLongitude(maxLon), normalizeLatitude(maxLat) }; + final double[] tMin = new double[2]; + final double[] tMax = new double[2]; + tMin[0] = Math.min(min[0], max[0]); + tMax[0] = Math.max(min[0], max[0]); + tMin[1] = Math.min(min[1], max[1]); + tMax[1] = Math.max(min[1], max[1]); + + pt[0] = tMin[0] + r.nextDouble() * (tMax[0] - tMin[0]); + pt[1] = tMin[1] + r.nextDouble() * (tMax[1] - tMin[1]); + } + + public static GeoPoint randomPoint(Random r) { + return randomPointIn(r, -180, -90, 180, 90); + } + + public static GeoPoint randomPointIn(Random r, final double minLon, final double minLat, final double maxLon, final double maxLat) { + double[] pt = new double[2]; + randomPointIn(r, minLon, minLat, maxLon, maxLat, pt); + return new GeoPoint(pt[1], pt[0]); + } + + /** Puts latitude in range of -90 to 90. */ + private static double normalizeLatitude(double latitude) { + if (latitude >= -90 && latitude <= 90) { + return latitude; // common case, and avoids slight double precision shifting + } + double off = Math.abs((latitude + 90) % 360); + return (off <= 180 ? off : 360 - off) - 90; + } + + /** Puts longitude in range of -180 to +180. */ + private static double normalizeLongitude(double longitude) { + if (longitude >= -180 && longitude <= 180) { + return longitude; // common case, and avoids slight double precision shifting + } + double off = (longitude + 180) % 360; + if (off < 0) { + return 180 + off; + } else if (off == 0 && longitude > 0) { + return 180; + } else { + return -180 + off; + } + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MissingValueIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MissingValueIT.java index 7d3f06760882d..26bfe59618275 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MissingValueIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MissingValueIT.java @@ -39,7 +39,6 @@ import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.search.aggregations.bucket.terms.TermsAggregatorFactory.ExecutionMode; import org.opensearch.search.aggregations.metrics.Cardinality; -import org.opensearch.search.aggregations.metrics.GeoBounds; import org.opensearch.search.aggregations.metrics.GeoCentroid; import org.opensearch.search.aggregations.metrics.Percentiles; import org.opensearch.search.aggregations.metrics.Stats; @@ -47,7 +46,6 @@ import static org.opensearch.search.aggregations.AggregationBuilders.cardinality; import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; -import static org.opensearch.search.aggregations.AggregationBuilders.geoBounds; import static org.opensearch.search.aggregations.AggregationBuilders.geoCentroid; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.percentiles; @@ -213,28 +211,6 @@ public void testStats() { assertEquals(4, stats.getAvg(), 0); } - public void testUnmappedGeoBounds() { - SearchResponse response = client().prepareSearch("idx") - .addAggregation(geoBounds("bounds").field("non_existing_field").missing("2,1")) - .get(); - assertSearchResponse(response); - GeoBounds bounds = response.getAggregations().get("bounds"); - assertThat(bounds.bottomRight().lat(), closeTo(2.0, 1E-5)); - assertThat(bounds.bottomRight().lon(), closeTo(1.0, 1E-5)); - assertThat(bounds.topLeft().lat(), closeTo(2.0, 1E-5)); - assertThat(bounds.topLeft().lon(), closeTo(1.0, 1E-5)); - } - - public void testGeoBounds() { - SearchResponse response = client().prepareSearch("idx").addAggregation(geoBounds("bounds").field("location").missing("2,1")).get(); - assertSearchResponse(response); - GeoBounds bounds = response.getAggregations().get("bounds"); - assertThat(bounds.bottomRight().lat(), closeTo(1.0, 1E-5)); - assertThat(bounds.bottomRight().lon(), closeTo(2.0, 1E-5)); - assertThat(bounds.topLeft().lat(), closeTo(2.0, 1E-5)); - assertThat(bounds.topLeft().lon(), closeTo(1.0, 1E-5)); - } - public void testGeoCentroid() { SearchResponse response = client().prepareSearch("idx") .addAggregation(geoCentroid("centroid").field("location").missing("2,1")) diff --git a/server/src/main/java/org/opensearch/search/SearchModule.java b/server/src/main/java/org/opensearch/search/SearchModule.java index 0995497eba50e..80e025a3651a8 100644 --- a/server/src/main/java/org/opensearch/search/SearchModule.java +++ b/server/src/main/java/org/opensearch/search/SearchModule.java @@ -185,12 +185,10 @@ import org.opensearch.search.aggregations.metrics.AvgAggregationBuilder; import org.opensearch.search.aggregations.metrics.CardinalityAggregationBuilder; import org.opensearch.search.aggregations.metrics.ExtendedStatsAggregationBuilder; -import org.opensearch.search.aggregations.metrics.GeoBoundsAggregationBuilder; import org.opensearch.search.aggregations.metrics.GeoCentroidAggregationBuilder; import org.opensearch.search.aggregations.metrics.InternalAvg; import org.opensearch.search.aggregations.metrics.InternalCardinality; import org.opensearch.search.aggregations.metrics.InternalExtendedStats; -import org.opensearch.search.aggregations.metrics.InternalGeoBounds; import org.opensearch.search.aggregations.metrics.InternalGeoCentroid; import org.opensearch.search.aggregations.metrics.InternalHDRPercentileRanks; import org.opensearch.search.aggregations.metrics.InternalHDRPercentiles; @@ -664,12 +662,6 @@ private ValuesSourceRegistry registerAggregations(List plugins) { .addResultReader(InternalTopHits::new), builder ); - registerAggregation( - new AggregationSpec(GeoBoundsAggregationBuilder.NAME, GeoBoundsAggregationBuilder::new, GeoBoundsAggregationBuilder.PARSER) - .addResultReader(InternalGeoBounds::new) - .setAggregatorRegistrar(GeoBoundsAggregationBuilder::registerAggregators), - builder - ); registerAggregation( new AggregationSpec( GeoCentroidAggregationBuilder.NAME, diff --git a/server/src/main/java/org/opensearch/search/aggregations/AggregationBuilders.java b/server/src/main/java/org/opensearch/search/aggregations/AggregationBuilders.java index 01e0f95b0d750..382455093309d 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/AggregationBuilders.java +++ b/server/src/main/java/org/opensearch/search/aggregations/AggregationBuilders.java @@ -78,8 +78,6 @@ import org.opensearch.search.aggregations.metrics.CardinalityAggregationBuilder; import org.opensearch.search.aggregations.metrics.ExtendedStats; import org.opensearch.search.aggregations.metrics.ExtendedStatsAggregationBuilder; -import org.opensearch.search.aggregations.metrics.GeoBounds; -import org.opensearch.search.aggregations.metrics.GeoBoundsAggregationBuilder; import org.opensearch.search.aggregations.metrics.GeoCentroid; import org.opensearch.search.aggregations.metrics.GeoCentroidAggregationBuilder; import org.opensearch.search.aggregations.metrics.Max; @@ -364,13 +362,6 @@ public static TopHitsAggregationBuilder topHits(String name) { return new TopHitsAggregationBuilder(name); } - /** - * Create a new {@link GeoBounds} aggregation with the given name. - */ - public static GeoBoundsAggregationBuilder geoBounds(String name) { - return new GeoBoundsAggregationBuilder(name); - } - /** * Create a new {@link GeoCentroid} aggregation with the given name. */ diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/AggregationInspectionHelper.java b/server/src/main/java/org/opensearch/search/aggregations/support/AggregationInspectionHelper.java index 8d036503d1330..f36c4620d5b33 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/AggregationInspectionHelper.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/AggregationInspectionHelper.java @@ -54,7 +54,6 @@ import org.opensearch.search.aggregations.metrics.InternalAvg; import org.opensearch.search.aggregations.metrics.InternalCardinality; import org.opensearch.search.aggregations.metrics.InternalExtendedStats; -import org.opensearch.search.aggregations.metrics.InternalGeoBounds; import org.opensearch.search.aggregations.metrics.InternalGeoCentroid; import org.opensearch.search.aggregations.metrics.InternalHDRPercentileRanks; import org.opensearch.search.aggregations.metrics.InternalHDRPercentiles; @@ -191,10 +190,6 @@ public static boolean hasValue(InternalExtendedStats agg) { return agg.getCount() > 0; } - public static boolean hasValue(InternalGeoBounds agg) { - return (agg.topLeft() == null && agg.bottomRight() == null) == false; - } - public static boolean hasValue(InternalGeoCentroid agg) { return agg.centroid() != null && agg.count() > 0; } diff --git a/server/src/test/java/org/opensearch/search/aggregations/AggregationsTests.java b/server/src/test/java/org/opensearch/search/aggregations/AggregationsTests.java index 421865013a28c..111ce23f8a0cb 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/AggregationsTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/AggregationsTests.java @@ -80,7 +80,6 @@ import org.opensearch.search.aggregations.metrics.InternalSumTests; import org.opensearch.search.aggregations.metrics.InternalAvgTests; import org.opensearch.search.aggregations.metrics.InternalCardinalityTests; -import org.opensearch.search.aggregations.metrics.InternalGeoBoundsTests; import org.opensearch.search.aggregations.metrics.InternalGeoCentroidTests; import org.opensearch.search.aggregations.metrics.InternalHDRPercentilesRanksTests; import org.opensearch.search.aggregations.metrics.InternalHDRPercentilesTests; @@ -142,7 +141,6 @@ private static List> getAggsTests() { aggsTests.add(new InternalStatsBucketTests()); aggsTests.add(new InternalExtendedStatsTests()); aggsTests.add(new InternalExtendedStatsBucketTests()); - aggsTests.add(new InternalGeoBoundsTests()); aggsTests.add(new InternalGeoCentroidTests()); aggsTests.add(new InternalHistogramTests()); aggsTests.add(new InternalDateHistogramTests()); diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/GeoBoundsTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/GeoBoundsTests.java deleted file mode 100644 index e132426680fc8..0000000000000 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/GeoBoundsTests.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.search.aggregations.metrics; - -import org.opensearch.search.aggregations.BaseAggregationTestCase; - -public class GeoBoundsTests extends BaseAggregationTestCase { - - @Override - protected GeoBoundsAggregationBuilder createTestAggregatorBuilder() { - GeoBoundsAggregationBuilder factory = new GeoBoundsAggregationBuilder(randomAlphaOfLengthBetween(1, 20)); - String field = randomAlphaOfLengthBetween(3, 20); - factory.field(field); - if (randomBoolean()) { - factory.wrapLongitude(randomBoolean()); - } - if (randomBoolean()) { - factory.missing("0,0"); - } - return factory; - } - -} diff --git a/test/framework/src/main/java/org/opensearch/test/InternalAggregationTestCase.java b/test/framework/src/main/java/org/opensearch/test/InternalAggregationTestCase.java index f138de152a488..a4099d66de28e 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalAggregationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalAggregationTestCase.java @@ -117,7 +117,6 @@ import org.opensearch.search.aggregations.metrics.AvgAggregationBuilder; import org.opensearch.search.aggregations.metrics.CardinalityAggregationBuilder; import org.opensearch.search.aggregations.metrics.ExtendedStatsAggregationBuilder; -import org.opensearch.search.aggregations.metrics.GeoBoundsAggregationBuilder; import org.opensearch.search.aggregations.metrics.GeoCentroidAggregationBuilder; import org.opensearch.search.aggregations.metrics.InternalHDRPercentileRanks; import org.opensearch.search.aggregations.metrics.InternalHDRPercentiles; @@ -129,7 +128,6 @@ import org.opensearch.search.aggregations.metrics.ParsedAvg; import org.opensearch.search.aggregations.metrics.ParsedCardinality; import org.opensearch.search.aggregations.metrics.ParsedExtendedStats; -import org.opensearch.search.aggregations.metrics.ParsedGeoBounds; import org.opensearch.search.aggregations.metrics.ParsedGeoCentroid; import org.opensearch.search.aggregations.metrics.ParsedHDRPercentileRanks; import org.opensearch.search.aggregations.metrics.ParsedHDRPercentiles; @@ -261,7 +259,6 @@ public ReduceContext forFinalReduction() { map.put(StatsBucketPipelineAggregationBuilder.NAME, (p, c) -> ParsedStatsBucket.fromXContent(p, (String) c)); map.put(ExtendedStatsAggregationBuilder.NAME, (p, c) -> ParsedExtendedStats.fromXContent(p, (String) c)); map.put(ExtendedStatsBucketPipelineAggregationBuilder.NAME, (p, c) -> ParsedExtendedStatsBucket.fromXContent(p, (String) c)); - map.put(GeoBoundsAggregationBuilder.NAME, (p, c) -> ParsedGeoBounds.fromXContent(p, (String) c)); map.put(GeoCentroidAggregationBuilder.NAME, (p, c) -> ParsedGeoCentroid.fromXContent(p, (String) c)); map.put(HistogramAggregationBuilder.NAME, (p, c) -> ParsedHistogram.fromXContent(p, (String) c)); map.put(DateHistogramAggregationBuilder.NAME, (p, c) -> ParsedDateHistogram.fromXContent(p, (String) c)); diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 8d8df2fec39f9..1ab7785b17f5e 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -2097,6 +2097,7 @@ protected Collection> getMockPlugins() { if (addMockGeoShapeFieldMapper()) { mocks.add(TestGeoShapeFieldMapperPlugin.class); } + return Collections.unmodifiableList(mocks); } From 1c3f034dfda74f657289eab9aba3d9242fd08470 Mon Sep 17 00:00:00 2001 From: Sachin Kale Date: Wed, 10 Aug 2022 18:12:19 +0530 Subject: [PATCH 077/104] [Remote Store] Add validator that forces segment replication type before enabling remote store (#4175) * Add validator that forces segment replication type before enabling remote store Signed-off-by: Sachin Kale --- .../cluster/metadata/IndexMetadata.java | 26 +++++++++++++++++ .../opensearch/index/IndexSettingsTests.java | 28 +++++++++++++++++++ 2 files changed, 54 insertions(+) diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index 6e39809ac1a34..7210de8c70f65 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -292,6 +292,32 @@ public Iterator> settings() { public static final Setting INDEX_REMOTE_STORE_ENABLED_SETTING = Setting.boolSetting( SETTING_REMOTE_STORE_ENABLED, false, + new Setting.Validator<>() { + + @Override + public void validate(final Boolean value) {} + + @Override + public void validate(final Boolean value, final Map, Object> settings) { + final Object replicationType = settings.get(INDEX_REPLICATION_TYPE_SETTING); + if (replicationType != ReplicationType.SEGMENT && value == true) { + throw new IllegalArgumentException( + "Settings " + + INDEX_REMOTE_STORE_ENABLED_SETTING.getKey() + + " cannot be enabled when " + + INDEX_REPLICATION_TYPE_SETTING.getKey() + + " is set to " + + settings.get(INDEX_REPLICATION_TYPE_SETTING) + ); + } + } + + @Override + public Iterator> settings() { + final List> settings = Collections.singletonList(INDEX_REPLICATION_TYPE_SETTING); + return settings.iterator(); + } + }, Property.IndexScope, Property.Final ); diff --git a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java index 90aa36253fb7f..5f969180e6b2a 100644 --- a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java +++ b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java @@ -42,6 +42,7 @@ import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.index.translog.Translog; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; @@ -854,4 +855,31 @@ public void testEnablingRemoteTranslogStoreFailsWhenRemoteSegmentDisabled() { iae.getMessage() ); } + + public void testEnablingRemoteStoreFailsWhenReplicationTypeIsDocument() { + Settings indexSettings = Settings.builder() + .put("index.replication.type", ReplicationType.DOCUMENT) + .put("index.remote_store.enabled", true) + .build(); + IllegalArgumentException iae = expectThrows( + IllegalArgumentException.class, + () -> IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING.get(indexSettings) + ); + assertEquals( + "Settings index.remote_store.enabled cannot be enabled when index.replication.type is set to DOCUMENT", + iae.getMessage() + ); + } + + public void testEnablingRemoteStoreFailsWhenReplicationTypeIsDefault() { + Settings indexSettings = Settings.builder().put("index.remote_store.enabled", true).build(); + IllegalArgumentException iae = expectThrows( + IllegalArgumentException.class, + () -> IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING.get(indexSettings) + ); + assertEquals( + "Settings index.remote_store.enabled cannot be enabled when index.replication.type is set to DOCUMENT", + iae.getMessage() + ); + } } From 7dad0635d7216c2d4440cf9b8ecb703602d3a322 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Wed, 10 Aug 2022 10:07:13 -0700 Subject: [PATCH 078/104] [Segment Replication] Fix OngoingSegmentReplications to key by allocation ID instead of DiscoveryNode. (#4182) * Fix OngoingSegmentReplications to key by allocation ID instead of DiscoveryNode. This change fixes segrep to work with multiple shards per node by keying ongoing replications on allocation ID. This also updates cancel methods to ensure state is properly cleared on shard cancel. Signed-off-by: Marc Handalian * Clean up cancel methods. Signed-off-by: Marc Handalian Signed-off-by: Marc Handalian --- .../replication/SegmentReplicationIT.java | 66 ++++++++++++++-- .../OngoingSegmentReplications.java | 79 ++++++++++++------- .../SegmentReplicationSourceHandler.java | 15 +++- .../OngoingSegmentReplicationsTests.java | 42 +++++++++- .../SegmentReplicationSourceHandlerTests.java | 4 + 5 files changed, 166 insertions(+), 40 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java index a1cc0148dcdac..dae2fa04a3a7e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java @@ -111,6 +111,54 @@ public void testReplicationAfterPrimaryRefreshAndFlush() throws Exception { } } + public void testMultipleShards() throws Exception { + Settings indexSettings = Settings.builder() + .put(super.indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 3) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), false) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build(); + final String nodeA = internalCluster().startNode(); + final String nodeB = internalCluster().startNode(); + createIndex(INDEX_NAME, indexSettings); + ensureGreen(INDEX_NAME); + + final int initialDocCount = scaledRandomIntBetween(1, 200); + try ( + BackgroundIndexer indexer = new BackgroundIndexer( + INDEX_NAME, + "_doc", + client(), + -1, + RandomizedTest.scaledRandomIntBetween(2, 5), + false, + random() + ) + ) { + indexer.start(initialDocCount); + waitForDocs(initialDocCount, indexer); + refresh(INDEX_NAME); + waitForReplicaUpdate(); + + assertHitCount(client(nodeA).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), initialDocCount); + assertHitCount(client(nodeB).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), initialDocCount); + + final int additionalDocCount = scaledRandomIntBetween(0, 200); + final int expectedHitCount = initialDocCount + additionalDocCount; + indexer.start(additionalDocCount); + waitForDocs(expectedHitCount, indexer); + + flushAndRefresh(INDEX_NAME); + waitForReplicaUpdate(); + assertHitCount(client(nodeA).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), expectedHitCount); + assertHitCount(client(nodeB).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), expectedHitCount); + + ensureGreen(INDEX_NAME); + assertSegmentStats(REPLICA_COUNT); + } + } + public void testReplicationAfterForceMerge() throws Exception { final String nodeA = internalCluster().startNode(); final String nodeB = internalCluster().startNode(); @@ -262,15 +310,17 @@ private void waitForReplicaUpdate() throws Exception { final Map> segmentListMap = segmentsByShardType(replicationGroupSegments); final List primaryShardSegmentsList = segmentListMap.get(true); final List replicaShardSegments = segmentListMap.get(false); - + // if we don't have any segments yet, proceed. final ShardSegments primaryShardSegments = primaryShardSegmentsList.stream().findFirst().get(); - final Map latestPrimarySegments = getLatestSegments(primaryShardSegments); - final Long latestPrimaryGen = latestPrimarySegments.values().stream().findFirst().map(Segment::getGeneration).get(); - for (ShardSegments shardSegments : replicaShardSegments) { - final boolean isReplicaCaughtUpToPrimary = shardSegments.getSegments() - .stream() - .anyMatch(segment -> segment.getGeneration() == latestPrimaryGen); - assertTrue(isReplicaCaughtUpToPrimary); + if (primaryShardSegments.getSegments().isEmpty() == false) { + final Map latestPrimarySegments = getLatestSegments(primaryShardSegments); + final Long latestPrimaryGen = latestPrimarySegments.values().stream().findFirst().map(Segment::getGeneration).get(); + for (ShardSegments shardSegments : replicaShardSegments) { + final boolean isReplicaCaughtUpToPrimary = shardSegments.getSegments() + .stream() + .anyMatch(segment -> segment.getGeneration() == latestPrimaryGen); + assertTrue(isReplicaCaughtUpToPrimary); + } } } }); diff --git a/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java b/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java index a9b032c98b70f..dfebe5f7cabf2 100644 --- a/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java +++ b/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java @@ -24,7 +24,10 @@ import java.io.IOException; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; +import java.util.function.Predicate; +import java.util.stream.Collectors; /** * Manages references to ongoing segrep events on a node. @@ -38,7 +41,7 @@ class OngoingSegmentReplications { private final RecoverySettings recoverySettings; private final IndicesService indicesService; private final Map copyStateMap; - private final Map nodesToHandlers; + private final Map allocationIdToHandlers; /** * Constructor. @@ -50,7 +53,7 @@ class OngoingSegmentReplications { this.indicesService = indicesService; this.recoverySettings = recoverySettings; this.copyStateMap = Collections.synchronizedMap(new HashMap<>()); - this.nodesToHandlers = ConcurrentCollections.newConcurrentMap(); + this.allocationIdToHandlers = ConcurrentCollections.newConcurrentMap(); } /** @@ -96,8 +99,7 @@ synchronized CopyState getCachedCopyState(ReplicationCheckpoint checkpoint) thro * @param listener {@link ActionListener} that resolves when sending files is complete. */ void startSegmentCopy(GetSegmentFilesRequest request, ActionListener listener) { - final DiscoveryNode node = request.getTargetNode(); - final SegmentReplicationSourceHandler handler = nodesToHandlers.get(node); + final SegmentReplicationSourceHandler handler = allocationIdToHandlers.get(request.getTargetAllocationId()); if (handler != null) { if (handler.isReplicating()) { throw new OpenSearchException( @@ -108,7 +110,7 @@ void startSegmentCopy(GetSegmentFilesRequest request, ActionListener wrappedListener = ActionListener.runBefore(listener, () -> { - final SegmentReplicationSourceHandler sourceHandler = nodesToHandlers.remove(node); + final SegmentReplicationSourceHandler sourceHandler = allocationIdToHandlers.remove(request.getTargetAllocationId()); if (sourceHandler != null) { removeCopyState(sourceHandler.getCopyState()); } @@ -123,19 +125,6 @@ void startSegmentCopy(GetSegmentFilesRequest request, ActionListener handler.getCopyState().getShard().shardId().equals(shard.shardId()), reason); + } + + /** + * Cancel any ongoing replications for a given {@link DiscoveryNode} + * + * @param node {@link DiscoveryNode} node for which to cancel replication events. + */ + void cancelReplication(DiscoveryNode node) { + cancelHandlers(handler -> handler.getTargetNode().equals(node), "Node left"); + } /** @@ -186,19 +180,25 @@ boolean isInCopyStateMap(ReplicationCheckpoint replicationCheckpoint) { } int size() { - return nodesToHandlers.size(); + return allocationIdToHandlers.size(); } int cachedCopyStateSize() { return copyStateMap.size(); } - private SegmentReplicationSourceHandler createTargetHandler(DiscoveryNode node, CopyState copyState, FileChunkWriter fileChunkWriter) { + private SegmentReplicationSourceHandler createTargetHandler( + DiscoveryNode node, + CopyState copyState, + String allocationId, + FileChunkWriter fileChunkWriter + ) { return new SegmentReplicationSourceHandler( node, fileChunkWriter, copyState.getShard().getThreadPool(), copyState, + allocationId, Math.toIntExact(recoverySettings.getChunkSize().getBytes()), recoverySettings.getMaxConcurrentFileChunks() ); @@ -231,4 +231,23 @@ private synchronized void removeCopyState(CopyState copyState) { copyStateMap.remove(copyState.getRequestedReplicationCheckpoint()); } } + + /** + * Remove handlers from allocationIdToHandlers map based on a filter predicate. + * This will also decref the handler's CopyState reference. + */ + private void cancelHandlers(Predicate predicate, String reason) { + final List allocationIds = allocationIdToHandlers.values() + .stream() + .filter(predicate) + .map(SegmentReplicationSourceHandler::getAllocationId) + .collect(Collectors.toList()); + for (String allocationId : allocationIds) { + final SegmentReplicationSourceHandler handler = allocationIdToHandlers.remove(allocationId); + if (handler != null) { + handler.cancel(reason); + removeCopyState(handler.getCopyState()); + } + } + } } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java index 8911302a722f5..46bad7951a2e1 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java @@ -54,6 +54,8 @@ class SegmentReplicationSourceHandler { private final List resources = new CopyOnWriteArrayList<>(); private final Logger logger; private final AtomicBoolean isReplicating = new AtomicBoolean(); + private final DiscoveryNode targetNode; + private final String allocationId; /** * Constructor. @@ -70,9 +72,11 @@ class SegmentReplicationSourceHandler { FileChunkWriter writer, ThreadPool threadPool, CopyState copyState, + String allocationId, int fileChunkSizeInBytes, int maxConcurrentFileChunks ) { + this.targetNode = targetNode; this.shard = copyState.getShard(); this.logger = Loggers.getLogger( SegmentReplicationSourceHandler.class, @@ -89,6 +93,7 @@ class SegmentReplicationSourceHandler { fileChunkSizeInBytes, maxConcurrentFileChunks ); + this.allocationId = allocationId; this.copyState = copyState; } @@ -118,7 +123,7 @@ public synchronized void sendFiles(GetSegmentFilesRequest request, ActionListene logger.debug( "delaying replication of {} as it is not listed as assigned to target node {}", shard.shardId(), - request.getTargetNode() + targetNode ); throw new DelayRecoveryException("source node does not have the shard listed in its state as allocated on the node"); } @@ -175,4 +180,12 @@ CopyState getCopyState() { public boolean isReplicating() { return isReplicating.get(); } + + public DiscoveryNode getTargetNode() { + return targetNode; + } + + public String getAllocationId() { + return allocationId; + } } diff --git a/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java b/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java index d42e75871a45a..38c55620e1223 100644 --- a/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java @@ -155,6 +155,9 @@ public void testCancelReplication() throws IOException { } public void testMultipleReplicasUseSameCheckpoint() throws IOException { + IndexShard secondReplica = newShard(primary.shardId(), false); + recoverReplica(secondReplica, primary, true); + OngoingSegmentReplications replications = new OngoingSegmentReplications(mockIndicesService, recoverySettings); final CheckpointInfoRequest request = new CheckpointInfoRequest( 1L, @@ -172,7 +175,7 @@ public void testMultipleReplicasUseSameCheckpoint() throws IOException { final CheckpointInfoRequest secondRequest = new CheckpointInfoRequest( 1L, - replica.routingEntry().allocationId().getId(), + secondReplica.routingEntry().allocationId().getId(), replicaDiscoveryNode, testCheckpoint ); @@ -187,6 +190,7 @@ public void testMultipleReplicasUseSameCheckpoint() throws IOException { assertEquals(0, copyState.refCount()); assertEquals(0, replications.size()); assertEquals(0, replications.cachedCopyStateSize()); + closeShards(secondReplica); } public void testStartCopyWithoutPrepareStep() { @@ -272,4 +276,40 @@ public void onFailure(Exception e) { } }); } + + public void testCancelAllReplicationsForShard() throws IOException { + // This tests when primary has multiple ongoing replications. + IndexShard replica_2 = newShard(primary.shardId(), false); + recoverReplica(replica_2, primary, true); + + OngoingSegmentReplications replications = new OngoingSegmentReplications(mockIndicesService, recoverySettings); + final CheckpointInfoRequest request = new CheckpointInfoRequest( + 1L, + replica.routingEntry().allocationId().getId(), + primaryDiscoveryNode, + testCheckpoint + ); + + final CopyState copyState = replications.prepareForReplication(request, mock(FileChunkWriter.class)); + assertEquals(1, copyState.refCount()); + + final CheckpointInfoRequest secondRequest = new CheckpointInfoRequest( + 1L, + replica_2.routingEntry().allocationId().getId(), + replicaDiscoveryNode, + testCheckpoint + ); + replications.prepareForReplication(secondRequest, mock(FileChunkWriter.class)); + + assertEquals(2, copyState.refCount()); + assertEquals(2, replications.size()); + assertEquals(1, replications.cachedCopyStateSize()); + + // cancel the primary's ongoing replications. + replications.cancel(primary, "Test"); + assertEquals(0, copyState.refCount()); + assertEquals(0, replications.size()); + assertEquals(0, replications.cachedCopyStateSize()); + closeShards(replica_2); + } } diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java index 535cd5974490f..2c52772649acc 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java @@ -66,6 +66,7 @@ public void testSendFiles() throws IOException { chunkWriter, threadPool, copyState, + primary.routingEntry().allocationId().getId(), 5000, 1 ); @@ -103,6 +104,7 @@ public void testSendFiles_emptyRequest() throws IOException { chunkWriter, threadPool, copyState, + primary.routingEntry().allocationId().getId(), 5000, 1 ); @@ -141,6 +143,7 @@ public void testSendFileFails() throws IOException { chunkWriter, threadPool, copyState, + primary.routingEntry().allocationId().getId(), 5000, 1 ); @@ -178,6 +181,7 @@ public void testReplicationAlreadyRunning() throws IOException { chunkWriter, threadPool, copyState, + primary.routingEntry().allocationId().getId(), 5000, 1 ); From 8ccedd89d979eaaaec45523b409d61d3e4806c51 Mon Sep 17 00:00:00 2001 From: Sachin Kale Date: Fri, 12 Aug 2022 18:58:51 +0530 Subject: [PATCH 079/104] [Remote Store] Change remote_store setting validation message to make it more clear (#4199) * Change remote_store setting validation message to make it more clear Signed-off-by: Sachin Kale --- .../org/opensearch/cluster/metadata/IndexMetadata.java | 8 ++++---- .../java/org/opensearch/index/IndexSettingsTests.java | 10 ++-------- 2 files changed, 6 insertions(+), 12 deletions(-) diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index 7210de8c70f65..759891e88039b 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -302,12 +302,12 @@ public void validate(final Boolean value, final Map, Object> settings final Object replicationType = settings.get(INDEX_REPLICATION_TYPE_SETTING); if (replicationType != ReplicationType.SEGMENT && value == true) { throw new IllegalArgumentException( - "Settings " + "To enable " + INDEX_REMOTE_STORE_ENABLED_SETTING.getKey() - + " cannot be enabled when " + + ", " + INDEX_REPLICATION_TYPE_SETTING.getKey() - + " is set to " - + settings.get(INDEX_REPLICATION_TYPE_SETTING) + + " should be set to " + + ReplicationType.SEGMENT ); } } diff --git a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java index 5f969180e6b2a..e02eac85beafb 100644 --- a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java +++ b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java @@ -865,10 +865,7 @@ public void testEnablingRemoteStoreFailsWhenReplicationTypeIsDocument() { IllegalArgumentException.class, () -> IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING.get(indexSettings) ); - assertEquals( - "Settings index.remote_store.enabled cannot be enabled when index.replication.type is set to DOCUMENT", - iae.getMessage() - ); + assertEquals("To enable index.remote_store.enabled, index.replication.type should be set to SEGMENT", iae.getMessage()); } public void testEnablingRemoteStoreFailsWhenReplicationTypeIsDefault() { @@ -877,9 +874,6 @@ public void testEnablingRemoteStoreFailsWhenReplicationTypeIsDefault() { IllegalArgumentException.class, () -> IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING.get(indexSettings) ); - assertEquals( - "Settings index.remote_store.enabled cannot be enabled when index.replication.type is set to DOCUMENT", - iae.getMessage() - ); + assertEquals("To enable index.remote_store.enabled, index.replication.type should be set to SEGMENT", iae.getMessage()); } } From b9a64cbfeb4da0e62367a344aa629cc59c953eff Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Fri, 12 Aug 2022 10:08:34 -0700 Subject: [PATCH 080/104] [Bug] [Segment Replication] Update store metadata recovery diff logic to ignore missing files causing exception (#4185) * Update Store for segment replication dif Signed-off-by: Poojita Raj Signed-off-by: Poojita Raj * Update recoveryDiff logic to ingore missing files causing exception on replica during copy Signed-off-by: Suraj Singh * Address review comments Signed-off-by: Suraj Singh Signed-off-by: Poojita Raj Signed-off-by: Suraj Singh Co-authored-by: Poojita Raj --- .../replication/SegmentReplicationIT.java | 52 +++++++ .../org/opensearch/index/store/Store.java | 88 ++++++++--- .../replication/SegmentReplicationTarget.java | 2 +- .../SegmentReplicationTargetTests.java | 138 +++++++++++++++++- 4 files changed, 260 insertions(+), 20 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java index dae2fa04a3a7e..96ac703b9837e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java @@ -15,6 +15,7 @@ import org.opensearch.action.admin.indices.segments.IndicesSegmentResponse; import org.opensearch.action.admin.indices.segments.IndicesSegmentsRequest; import org.opensearch.action.admin.indices.segments.ShardSegments; +import org.opensearch.action.support.WriteRequest; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; @@ -36,6 +37,7 @@ import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.Set; import java.util.function.Function; import java.util.stream.Collectors; @@ -247,6 +249,56 @@ public void testStartReplicaAfterPrimaryIndexesDocs() throws Exception { assertSegmentStats(REPLICA_COUNT); } + public void testDeleteOperations() throws Exception { + final String nodeA = internalCluster().startNode(); + final String nodeB = internalCluster().startNode(); + + createIndex(INDEX_NAME); + ensureGreen(INDEX_NAME); + final int initialDocCount = scaledRandomIntBetween(0, 200); + try ( + BackgroundIndexer indexer = new BackgroundIndexer( + INDEX_NAME, + "_doc", + client(), + -1, + RandomizedTest.scaledRandomIntBetween(2, 5), + false, + random() + ) + ) { + indexer.start(initialDocCount); + waitForDocs(initialDocCount, indexer); + refresh(INDEX_NAME); + waitForReplicaUpdate(); + + // wait a short amount of time to give replication a chance to complete. + assertHitCount(client(nodeA).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), initialDocCount); + assertHitCount(client(nodeB).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), initialDocCount); + + final int additionalDocCount = scaledRandomIntBetween(0, 200); + final int expectedHitCount = initialDocCount + additionalDocCount; + indexer.start(additionalDocCount); + waitForDocs(expectedHitCount, indexer); + waitForReplicaUpdate(); + + assertHitCount(client(nodeA).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), expectedHitCount); + assertHitCount(client(nodeB).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), expectedHitCount); + + ensureGreen(INDEX_NAME); + + Set ids = indexer.getIds(); + String id = ids.toArray()[0].toString(); + client(nodeA).prepareDelete(INDEX_NAME, id).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + + refresh(INDEX_NAME); + waitForReplicaUpdate(); + + assertHitCount(client(nodeA).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), expectedHitCount - 1); + assertHitCount(client(nodeB).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), expectedHitCount - 1); + } + } + private void assertSegmentStats(int numberOfReplicas) throws IOException { final IndicesSegmentResponse indicesSegmentResponse = client().admin().indices().segments(new IndicesSegmentsRequest()).actionGet(); diff --git a/server/src/main/java/org/opensearch/index/store/Store.java b/server/src/main/java/org/opensearch/index/store/Store.java index 6828ab7d91b2c..163717ad94c2c 100644 --- a/server/src/main/java/org/opensearch/index/store/Store.java +++ b/server/src/main/java/org/opensearch/index/store/Store.java @@ -110,8 +110,8 @@ import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.Optional; +import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -1102,6 +1102,30 @@ public Map asMap() { private static final String LIV_FILE_EXTENSION = "liv"; // lucene 5 delete file private static final String SEGMENT_INFO_EXTENSION = "si"; + /** + * Helper method used to group store files according to segment and commit. + * + * @see MetadataSnapshot#recoveryDiff(MetadataSnapshot) + * @see MetadataSnapshot#segmentReplicationDiff(MetadataSnapshot) + */ + private Iterable> getGroupedFilesIterable() { + final Map> perSegment = new HashMap<>(); + final List perCommitStoreFiles = new ArrayList<>(); + for (StoreFileMetadata meta : this) { + final String segmentId = IndexFileNames.parseSegmentName(meta.name()); + final String extension = IndexFileNames.getExtension(meta.name()); + if (IndexFileNames.SEGMENTS.equals(segmentId) + || DEL_FILE_EXTENSION.equals(extension) + || LIV_FILE_EXTENSION.equals(extension)) { + // only treat del files as per-commit files fnm files are generational but only for upgradable DV + perCommitStoreFiles.add(meta); + } else { + perSegment.computeIfAbsent(segmentId, k -> new ArrayList<>()).add(meta); + } + } + return Iterables.concat(perSegment.values(), Collections.singleton(perCommitStoreFiles)); + } + /** * Returns a diff between the two snapshots that can be used for recovery. The given snapshot is treated as the * recovery target and this snapshot as the source. The returned diff will hold a list of files that are: @@ -1139,23 +1163,8 @@ public RecoveryDiff recoveryDiff(MetadataSnapshot recoveryTargetSnapshot) { final List identical = new ArrayList<>(); final List different = new ArrayList<>(); final List missing = new ArrayList<>(); - final Map> perSegment = new HashMap<>(); - final List perCommitStoreFiles = new ArrayList<>(); - - for (StoreFileMetadata meta : this) { - final String segmentId = IndexFileNames.parseSegmentName(meta.name()); - final String extension = IndexFileNames.getExtension(meta.name()); - if (IndexFileNames.SEGMENTS.equals(segmentId) - || DEL_FILE_EXTENSION.equals(extension) - || LIV_FILE_EXTENSION.equals(extension)) { - // only treat del files as per-commit files fnm files are generational but only for upgradable DV - perCommitStoreFiles.add(meta); - } else { - perSegment.computeIfAbsent(segmentId, k -> new ArrayList<>()).add(meta); - } - } final ArrayList identicalFiles = new ArrayList<>(); - for (List segmentFiles : Iterables.concat(perSegment.values(), Collections.singleton(perCommitStoreFiles))) { + for (List segmentFiles : getGroupedFilesIterable()) { identicalFiles.clear(); boolean consistent = true; for (StoreFileMetadata meta : segmentFiles) { @@ -1190,6 +1199,51 @@ public RecoveryDiff recoveryDiff(MetadataSnapshot recoveryTargetSnapshot) { return recoveryDiff; } + /** + * Segment Replication method + * Returns a diff between the two snapshots that can be used for getting list of files to copy over to a replica for segment replication. The given snapshot is treated as the + * target and this snapshot as the source. The returned diff will hold a list of files that are: + *

    + *
  • identical: they exist in both snapshots and they can be considered the same ie. they don't need to be recovered
  • + *
  • different: they exist in both snapshots but their they are not identical
  • + *
  • missing: files that exist in the source but not in the target
  • + *
+ */ + public RecoveryDiff segmentReplicationDiff(MetadataSnapshot recoveryTargetSnapshot) { + final List identical = new ArrayList<>(); + final List different = new ArrayList<>(); + final List missing = new ArrayList<>(); + final ArrayList identicalFiles = new ArrayList<>(); + for (List segmentFiles : getGroupedFilesIterable()) { + identicalFiles.clear(); + boolean consistent = true; + for (StoreFileMetadata meta : segmentFiles) { + StoreFileMetadata storeFileMetadata = recoveryTargetSnapshot.get(meta.name()); + if (storeFileMetadata == null) { + // Do not consider missing files as inconsistent in SegRep as replicas may lag while primary updates + // documents and generate new files specific to a segment + missing.add(meta); + } else if (storeFileMetadata.isSame(meta) == false) { + consistent = false; + different.add(meta); + } else { + identicalFiles.add(meta); + } + } + if (consistent) { + identical.addAll(identicalFiles); + } else { + different.addAll(identicalFiles); + } + } + RecoveryDiff recoveryDiff = new RecoveryDiff( + Collections.unmodifiableList(identical), + Collections.unmodifiableList(different), + Collections.unmodifiableList(missing) + ); + return recoveryDiff; + } + /** * Returns the number of files in this snapshot */ diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java index 516cfa91a787b..73d9a2f805d75 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java @@ -154,7 +154,7 @@ private void getFiles(CheckpointInfoResponse checkpointInfo, StepListener storeMetadataSnapshots = generateStoreMetadataSnapshot(docCount); + + SegmentReplicationSource segrepSource = new SegmentReplicationSource() { + @Override + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener listener + ) { + listener.onResponse( + new CheckpointInfoResponse(checkpoint, storeMetadataSnapshots.get(1), buffer.toArrayCopy(), Set.of(PENDING_DELETE_FILE)) + ); + } + + @Override + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List filesToFetch, + Store store, + ActionListener listener + ) { + listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); + } + }; + SegmentReplicationTargetService.SegmentReplicationListener segRepListener = mock( + SegmentReplicationTargetService.SegmentReplicationListener.class + ); + + segrepTarget = spy(new SegmentReplicationTarget(repCheckpoint, indexShard, segrepSource, segRepListener)); + when(segrepTarget.getMetadataSnapshot()).thenReturn(storeMetadataSnapshots.get(0)); + segrepTarget.startReplication(new ActionListener() { + @Override + public void onResponse(Void replicationResponse) { + logger.info("No error processing checkpoint info"); + } + + @Override + public void onFailure(Exception e) { + assert (e instanceof IllegalStateException); + } + }); + } + + /** + * Generates a list of Store.MetadataSnapshot with two elements where second snapshot has extra files due to delete + * operation. A list of snapshots is returned so that identical files have same checksum. + * @param docCount + * @return + * @throws IOException + */ + List generateStoreMetadataSnapshot(int docCount) throws IOException { + List docList = new ArrayList<>(); + for (int i = 0; i < docCount; i++) { + Document document = new Document(); + String text = new String(new char[] { (char) (97 + i), (char) (97 + i) }); + document.add(new StringField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); + document.add(new TextField("str", text, Field.Store.YES)); + docList.add(document); + } + long seed = random().nextLong(); + Random random = new Random(seed); + IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random)).setCodec(TestUtil.getDefaultCodec()); + iwc.setMergePolicy(NoMergePolicy.INSTANCE); + iwc.setUseCompoundFile(true); + final ShardId shardId = new ShardId("index", "_na_", 1); + Store store = new Store(shardId, INDEX_SETTINGS, StoreTests.newDirectory(random()), new DummyShardLock(shardId)); + IndexWriter writer = new IndexWriter(store.directory(), iwc); + for (Document d : docList) { + writer.addDocument(d); + } + writer.commit(); + Store.MetadataSnapshot storeMetadata = store.getMetadata(); + // Delete one document to generate .liv file + writer.deleteDocuments(new Term("id", Integer.toString(random().nextInt(docCount)))); + writer.commit(); + Store.MetadataSnapshot storeMetadataWithDeletes = store.getMetadata(); + deleteContent(store.directory()); + writer.close(); + store.close(); + return Arrays.asList(storeMetadata, storeMetadataWithDeletes); + } + + public static void deleteContent(Directory directory) throws IOException { + final String[] files = directory.listAll(); + final List exceptions = new ArrayList<>(); + for (String file : files) { + try { + directory.deleteFile(file); + } catch (NoSuchFileException | FileNotFoundException e) { + // ignore + } catch (IOException e) { + exceptions.add(e); + } + } + ExceptionsHelper.rethrowAndSuppress(exceptions); + } + @Override public void tearDown() throws Exception { super.tearDown(); From 5f2e66bfb11ebaea536ee62bc3764a848a6ab330 Mon Sep 17 00:00:00 2001 From: Sachin Kale Date: Sat, 13 Aug 2022 22:48:16 +0530 Subject: [PATCH 081/104] [Remote Store] Add RemoteSegmentStoreDirectory to interact with remote segment store (#4020) * Add RemoteSegmentStoreDirectory to interact with remote segment store Signed-off-by: Sachin Kale --- .../index/store/RemoteDirectory.java | 12 +- .../store/RemoteSegmentStoreDirectory.java | 372 ++++++++++++++++++ .../index/store/RemoteDirectoryTests.java | 20 + .../RemoteSegmentStoreDirectoryTests.java | 339 ++++++++++++++++ 4 files changed, 742 insertions(+), 1 deletion(-) create mode 100644 server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java create mode 100644 server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java diff --git a/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java index 855457f275122..62e2b12896411 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java @@ -33,7 +33,7 @@ * * @opensearch.internal */ -public final class RemoteDirectory extends Directory { +public class RemoteDirectory extends Directory { private final BlobContainer blobContainer; @@ -50,6 +50,16 @@ public String[] listAll() throws IOException { return blobContainer.listBlobs().keySet().stream().sorted().toArray(String[]::new); } + /** + * Returns names of files with given prefix in this directory. + * @param filenamePrefix The prefix to match against file names in the directory + * @return A list of the matching filenames in the directory + * @throws IOException if there were any failures in reading from the blob container + */ + public Collection listFilesByPrefix(String filenamePrefix) throws IOException { + return blobContainer.listBlobsByPrefix(filenamePrefix).keySet(); + } + /** * Removes an existing file in the directory. * diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java new file mode 100644 index 0000000000000..d7d6b29d08bfc --- /dev/null +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -0,0 +1,372 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.FilterDirectory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.opensearch.common.UUIDs; + +import java.io.IOException; +import java.nio.file.NoSuchFileException; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.ConcurrentHashMap; +import java.util.stream.Collectors; + +/** + * A RemoteDirectory extension for remote segment store. We need to make sure we don't overwrite a segment file once uploaded. + * In order to prevent segment overwrite which can occur due to two primary nodes for the same shard at the same time, + * a unique suffix is added to the uploaded segment file. This class keeps track of filename of segments stored + * in remote segment store vs filename in local filesystem and provides the consistent Directory interface so that + * caller will be accessing segment files in the same way as {@code FSDirectory}. Apart from storing actual segment files, + * remote segment store also keeps track of refresh checkpoints as metadata in a separate path which is handled by + * another instance of {@code RemoteDirectory}. + * @opensearch.internal + */ +public final class RemoteSegmentStoreDirectory extends FilterDirectory { + /** + * Each segment file is uploaded with unique suffix. + * For example, _0.cfe in local filesystem will be uploaded to remote segment store as _0.cfe__gX7bNIIBrs0AUNsR2yEG + */ + public static final String SEGMENT_NAME_UUID_SEPARATOR = "__"; + + public static final MetadataFilenameUtils.MetadataFilenameComparator METADATA_FILENAME_COMPARATOR = + new MetadataFilenameUtils.MetadataFilenameComparator(); + + /** + * remoteDataDirectory is used to store segment files at path: cluster_UUID/index_UUID/shardId/segments/data + */ + private final RemoteDirectory remoteDataDirectory; + /** + * remoteMetadataDirectory is used to store metadata files at path: cluster_UUID/index_UUID/shardId/segments/metadata + */ + private final RemoteDirectory remoteMetadataDirectory; + + /** + * To prevent explosion of refresh metadata files, we replace refresh files for the given primary term and generation + * This is achieved by uploading refresh metadata file with the same UUID suffix. + */ + private String metadataFileUniqueSuffix; + + /** + * Keeps track of local segment filename to uploaded filename along with other attributes like checksum. + * This map acts as a cache layer for uploaded segment filenames which helps avoid calling listAll() each time. + * It is important to initialize this map on creation of RemoteSegmentStoreDirectory and update it on each upload and delete. + */ + private Map segmentsUploadedToRemoteStore; + + private static final Logger logger = LogManager.getLogger(RemoteSegmentStoreDirectory.class); + + public RemoteSegmentStoreDirectory(RemoteDirectory remoteDataDirectory, RemoteDirectory remoteMetadataDirectory) throws IOException { + super(remoteDataDirectory); + this.remoteDataDirectory = remoteDataDirectory; + this.remoteMetadataDirectory = remoteMetadataDirectory; + init(); + } + + /** + * Initializes the cache which keeps track of all the segment files uploaded to the remote segment store. + * As this cache is specific to an instance of RemoteSegmentStoreDirectory, it is possible that cache becomes stale + * if another instance of RemoteSegmentStoreDirectory is used to upload/delete segment files. + * It is caller's responsibility to call init() again to ensure that cache is properly updated. + * @throws IOException if there were any failures in reading the metadata file + */ + public void init() throws IOException { + this.metadataFileUniqueSuffix = UUIDs.base64UUID(); + this.segmentsUploadedToRemoteStore = new ConcurrentHashMap<>(readLatestMetadataFile()); + } + + /** + * Read the latest metadata file to get the list of segments uploaded to the remote segment store. + * We upload a metadata file per refresh, but it is not unique per refresh. Refresh metadata file is unique for a given commit. + * The format of refresh metadata filename is: refresh_metadata__PrimaryTerm__Generation__UUID + * Refresh metadata files keep track of active segments for the shard at the time of refresh. + * In order to get the list of segment files uploaded to the remote segment store, we need to read the latest metadata file. + * Each metadata file contains a map where + * Key is - Segment local filename and + * Value is - local filename::uploaded filename::checksum + * @return Map of segment filename to uploaded filename with checksum + * @throws IOException if there were any failures in reading the metadata file + */ + private Map readLatestMetadataFile() throws IOException { + Map segmentMetadataMap = new HashMap<>(); + + Collection metadataFiles = remoteMetadataDirectory.listFilesByPrefix(MetadataFilenameUtils.METADATA_PREFIX); + Optional latestMetadataFile = metadataFiles.stream().max(METADATA_FILENAME_COMPARATOR); + + if (latestMetadataFile.isPresent()) { + logger.info("Reading latest Metadata file {}", latestMetadataFile.get()); + segmentMetadataMap = readMetadataFile(latestMetadataFile.get()); + } else { + logger.info("No metadata file found, this can happen for new index with no data uploaded to remote segment store"); + } + + return segmentMetadataMap; + } + + private Map readMetadataFile(String metadataFilename) throws IOException { + try (IndexInput indexInput = remoteMetadataDirectory.openInput(metadataFilename, IOContext.DEFAULT)) { + Map segmentMetadata = indexInput.readMapOfStrings(); + return segmentMetadata.entrySet() + .stream() + .collect(Collectors.toMap(Map.Entry::getKey, entry -> UploadedSegmentMetadata.fromString(entry.getValue()))); + } + } + + /** + * Metadata of a segment that is uploaded to remote segment store. + */ + static class UploadedSegmentMetadata { + private static final String SEPARATOR = "::"; + private final String originalFilename; + private final String uploadedFilename; + private final String checksum; + + UploadedSegmentMetadata(String originalFilename, String uploadedFilename, String checksum) { + this.originalFilename = originalFilename; + this.uploadedFilename = uploadedFilename; + this.checksum = checksum; + } + + @Override + public String toString() { + return String.join(SEPARATOR, originalFilename, uploadedFilename, checksum); + } + + public static UploadedSegmentMetadata fromString(String uploadedFilename) { + String[] values = uploadedFilename.split(SEPARATOR); + return new UploadedSegmentMetadata(values[0], values[1], values[2]); + } + } + + /** + * Contains utility methods that provide various parts of metadata filename along with comparator + * Each metadata filename is of format: PREFIX__PrimaryTerm__Generation__UUID + */ + static class MetadataFilenameUtils { + public static final String SEPARATOR = "__"; + public static final String METADATA_PREFIX = "metadata"; + + /** + * Comparator to sort the metadata filenames. The order of sorting is: Primary Term, Generation, UUID + * Even though UUID sort does not provide any info on recency, it provides a consistent way to sort the filenames. + */ + static class MetadataFilenameComparator implements Comparator { + @Override + public int compare(String first, String second) { + String[] firstTokens = first.split(SEPARATOR); + String[] secondTokens = second.split(SEPARATOR); + if (!firstTokens[0].equals(secondTokens[0])) { + return firstTokens[0].compareTo(secondTokens[0]); + } + long firstPrimaryTerm = getPrimaryTerm(firstTokens); + long secondPrimaryTerm = getPrimaryTerm(secondTokens); + if (firstPrimaryTerm != secondPrimaryTerm) { + return firstPrimaryTerm > secondPrimaryTerm ? 1 : -1; + } else { + long firstGeneration = getGeneration(firstTokens); + long secondGeneration = getGeneration(secondTokens); + if (firstGeneration != secondGeneration) { + return firstGeneration > secondGeneration ? 1 : -1; + } else { + return getUuid(firstTokens).compareTo(getUuid(secondTokens)); + } + } + } + } + + // Visible for testing + static String getMetadataFilename(long primaryTerm, long generation, String uuid) { + return String.join( + SEPARATOR, + METADATA_PREFIX, + Long.toString(primaryTerm), + Long.toString(generation, Character.MAX_RADIX), + uuid + ); + } + + // Visible for testing + static long getPrimaryTerm(String[] filenameTokens) { + return Long.parseLong(filenameTokens[1]); + } + + // Visible for testing + static long getGeneration(String[] filenameTokens) { + return Long.parseLong(filenameTokens[2], Character.MAX_RADIX); + } + + // Visible for testing + static String getUuid(String[] filenameTokens) { + return filenameTokens[3]; + } + } + + /** + * Returns list of all the segment files uploaded to remote segment store till the last refresh checkpoint. + * Any segment file that is uploaded without corresponding metadata file will not be visible as part of listAll(). + * We chose not to return cache entries for listAll as cache can have entries for stale segments as well. + * Even if we plan to delete stale segments from remote segment store, it will be a periodic operation. + * @return segment filenames stored in remote segment store + * @throws IOException if there were any failures in reading the metadata file + */ + @Override + public String[] listAll() throws IOException { + return readLatestMetadataFile().keySet().toArray(new String[0]); + } + + /** + * Delete segment file from remote segment store. + * @param name the name of an existing segment file in local filesystem. + * @throws IOException if the file exists but could not be deleted. + */ + @Override + public void deleteFile(String name) throws IOException { + String remoteFilename = getExistingRemoteFilename(name); + if (remoteFilename != null) { + remoteDataDirectory.deleteFile(remoteFilename); + segmentsUploadedToRemoteStore.remove(name); + } + } + + /** + * Returns the byte length of a segment file in the remote segment store. + * @param name the name of an existing segment file in local filesystem. + * @throws IOException in case of I/O error + * @throws NoSuchFileException if the file does not exist in the cache or remote segment store + */ + @Override + public long fileLength(String name) throws IOException { + String remoteFilename = getExistingRemoteFilename(name); + if (remoteFilename != null) { + return remoteDataDirectory.fileLength(remoteFilename); + } else { + throw new NoSuchFileException(name); + } + } + + /** + * Creates and returns a new instance of {@link RemoteIndexOutput} which will be used to copy files to the remote + * segment store. + * @param name the name of the file to create. + * @throws IOException in case of I/O error + */ + @Override + public IndexOutput createOutput(String name, IOContext context) throws IOException { + return remoteDataDirectory.createOutput(getNewRemoteSegmentFilename(name), context); + } + + /** + * Opens a stream for reading an existing file and returns {@link RemoteIndexInput} enclosing the stream. + * @param name the name of an existing file. + * @throws IOException in case of I/O error + * @throws NoSuchFileException if the file does not exist either in cache or remote segment store + */ + @Override + public IndexInput openInput(String name, IOContext context) throws IOException { + String remoteFilename = getExistingRemoteFilename(name); + if (remoteFilename != null) { + return remoteDataDirectory.openInput(remoteFilename, context); + } else { + throw new NoSuchFileException(name); + } + } + + /** + * Copies an existing src file from directory from to a non-existent file dest in this directory. + * Once the segment is uploaded to remote segment store, update the cache accordingly. + */ + @Override + public void copyFrom(Directory from, String src, String dest, IOContext context) throws IOException { + String remoteFilename = getNewRemoteSegmentFilename(dest); + remoteDataDirectory.copyFrom(from, src, remoteFilename, context); + String checksum = getChecksumOfLocalFile(from, src); + UploadedSegmentMetadata segmentMetadata = new UploadedSegmentMetadata(src, remoteFilename, checksum); + segmentsUploadedToRemoteStore.put(src, segmentMetadata); + } + + /** + * Checks if the file exists in the uploadedSegments cache and the checksum matches. + * It is important to match the checksum as the same segment filename can be used for different + * segments due to a concurrency issue. + * @param localFilename filename of segment stored in local filesystem + * @param checksum checksum of the segment file + * @return true if file exists in cache and checksum matches. + */ + public boolean containsFile(String localFilename, String checksum) { + return segmentsUploadedToRemoteStore.containsKey(localFilename) + && segmentsUploadedToRemoteStore.get(localFilename).checksum.equals(checksum); + } + + /** + * Upload metadata file + * @param segmentFiles segment files that are part of the shard at the time of the latest refresh + * @param storeDirectory instance of local directory to temporarily create metadata file before upload + * @param primaryTerm primary term to be used in the name of metadata file + * @param generation commit generation + * @throws IOException in case of I/O error while uploading the metadata file + */ + public void uploadMetadata(Collection segmentFiles, Directory storeDirectory, long primaryTerm, long generation) + throws IOException { + synchronized (this) { + String metadataFilename = MetadataFilenameUtils.getMetadataFilename(primaryTerm, generation, this.metadataFileUniqueSuffix); + IndexOutput indexOutput = storeDirectory.createOutput(metadataFilename, IOContext.DEFAULT); + Map uploadedSegments = new HashMap<>(); + for (String file : segmentFiles) { + if (segmentsUploadedToRemoteStore.containsKey(file)) { + uploadedSegments.put(file, segmentsUploadedToRemoteStore.get(file).toString()); + } else { + throw new NoSuchFileException(file); + } + } + indexOutput.writeMapOfStrings(uploadedSegments); + indexOutput.close(); + storeDirectory.sync(Collections.singleton(metadataFilename)); + remoteMetadataDirectory.copyFrom(storeDirectory, metadataFilename, metadataFilename, IOContext.DEFAULT); + storeDirectory.deleteFile(metadataFilename); + } + } + + private String getChecksumOfLocalFile(Directory directory, String file) throws IOException { + try (IndexInput indexInput = directory.openInput(file, IOContext.DEFAULT)) { + return Long.toString(CodecUtil.retrieveChecksum(indexInput)); + } + } + + private String getExistingRemoteFilename(String localFilename) { + if (segmentsUploadedToRemoteStore.containsKey(localFilename)) { + return segmentsUploadedToRemoteStore.get(localFilename).uploadedFilename; + } else { + return null; + } + } + + private String getNewRemoteSegmentFilename(String localFilename) { + return localFilename + SEGMENT_NAME_UUID_SEPARATOR + UUIDs.base64UUID(); + } + + private String getLocalSegmentFilename(String remoteFilename) { + return remoteFilename.split(SEGMENT_NAME_UUID_SEPARATOR)[0]; + } + + // Visible for testing + Map getSegmentsUploadedToRemoteStore() { + return this.segmentsUploadedToRemoteStore; + } +} diff --git a/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java index 2ded77d2cecfd..97575248b4ad3 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java @@ -15,11 +15,13 @@ import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.support.PlainBlobMetadata; +import org.opensearch.common.collect.Set; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; import java.io.InputStream; import java.nio.file.NoSuchFileException; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -67,6 +69,24 @@ public void testListAllException() throws IOException { assertThrows(IOException.class, () -> remoteDirectory.listAll()); } + public void testListFilesByPrefix() throws IOException { + Map fileNames = Stream.of("abc", "abd", "abe", "abf", "abg") + .collect(Collectors.toMap(filename -> filename, filename -> new PlainBlobMetadata(filename, 100))); + + when(blobContainer.listBlobsByPrefix("ab")).thenReturn(fileNames); + + Collection actualFileNames = remoteDirectory.listFilesByPrefix("ab"); + Collection expectedFileName = Set.of("abc", "abd", "abe", "abf", "abg"); + assertEquals(expectedFileName, actualFileNames); + } + + public void testListFilesByPrefixException() throws IOException { + when(blobContainer.listBlobsByPrefix("abc")).thenThrow(new IOException("Error reading blob store")); + + assertThrows(IOException.class, () -> remoteDirectory.listFilesByPrefix("abc")); + verify(blobContainer).listBlobsByPrefix("abc"); + } + public void testDeleteFile() throws IOException { remoteDirectory.deleteFile("segment_1"); diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java new file mode 100644 index 0000000000000..4eabfa74625f2 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -0,0 +1,339 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store; + +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.tests.util.LuceneTestCase; +import org.junit.Before; +import org.opensearch.common.collect.Set; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.nio.file.NoSuchFileException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.startsWith; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class RemoteSegmentStoreDirectoryTests extends OpenSearchTestCase { + private RemoteDirectory remoteDataDirectory; + private RemoteDirectory remoteMetadataDirectory; + + private RemoteSegmentStoreDirectory remoteSegmentStoreDirectory; + + @Before + public void setup() throws IOException { + remoteDataDirectory = mock(RemoteDirectory.class); + remoteMetadataDirectory = mock(RemoteDirectory.class); + + remoteSegmentStoreDirectory = new RemoteSegmentStoreDirectory(remoteDataDirectory, remoteMetadataDirectory); + } + + public void testUploadedSegmentMetadataToString() { + RemoteSegmentStoreDirectory.UploadedSegmentMetadata metadata = new RemoteSegmentStoreDirectory.UploadedSegmentMetadata( + "abc", + "pqr", + "123456" + ); + assertEquals("abc::pqr::123456", metadata.toString()); + } + + public void testUploadedSegmentMetadataFromString() { + RemoteSegmentStoreDirectory.UploadedSegmentMetadata metadata = RemoteSegmentStoreDirectory.UploadedSegmentMetadata.fromString( + "_0.cfe::_0.cfe__uuidxyz::4567" + ); + assertEquals("_0.cfe::_0.cfe__uuidxyz::4567", metadata.toString()); + } + + public void testGetMetadataFilename() { + // Generation 23 is replaced by n due to radix 32 + assertEquals( + RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX + "__12__n__uuid1", + RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(12, 23, "uuid1") + ); + } + + public void testGetPrimaryTermGenerationUuid() { + String[] filenameTokens = "abc__12__n__uuid_xyz".split(RemoteSegmentStoreDirectory.MetadataFilenameUtils.SEPARATOR); + assertEquals(12, RemoteSegmentStoreDirectory.MetadataFilenameUtils.getPrimaryTerm(filenameTokens)); + assertEquals(23, RemoteSegmentStoreDirectory.MetadataFilenameUtils.getGeneration(filenameTokens)); + assertEquals("uuid_xyz", RemoteSegmentStoreDirectory.MetadataFilenameUtils.getUuid(filenameTokens)); + } + + public void testMetadataFilenameComparator() { + List metadataFilenames = new ArrayList<>( + List.of( + "abc__10__20__uuid1", + "abc__12__2__uuid2", + "pqr__1__1__uuid0", + "abc__3__n__uuid3", + "abc__10__8__uuid8", + "abc__3__a__uuid4", + "abc__3__a__uuid5" + ) + ); + metadataFilenames.sort(RemoteSegmentStoreDirectory.METADATA_FILENAME_COMPARATOR); + assertEquals( + List.of( + "abc__3__a__uuid4", + "abc__3__a__uuid5", + "abc__3__n__uuid3", + "abc__10__8__uuid8", + "abc__10__20__uuid1", + "abc__12__2__uuid2", + "pqr__1__1__uuid0" + ), + metadataFilenames + ); + } + + public void testInitException() throws IOException { + when(remoteMetadataDirectory.listFilesByPrefix(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX)).thenThrow( + new IOException("Error") + ); + + assertThrows(IOException.class, () -> remoteSegmentStoreDirectory.init()); + } + + public void testInitNoMetadataFile() throws IOException { + when(remoteMetadataDirectory.listFilesByPrefix(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX)).thenReturn( + List.of() + ); + + remoteSegmentStoreDirectory.init(); + Map actualCache = remoteSegmentStoreDirectory + .getSegmentsUploadedToRemoteStore(); + + assertEquals(Set.of(), actualCache.keySet()); + } + + private Map getDummyMetadata(String prefix, int commitGeneration) { + Map metadata = new HashMap<>(); + metadata.put(prefix + ".cfe", prefix + ".cfe::" + prefix + ".cfe__qrt::" + randomIntBetween(1000, 5000)); + metadata.put(prefix + ".cfs", prefix + ".cfs::" + prefix + ".cfs__zxd::" + randomIntBetween(1000, 5000)); + metadata.put(prefix + ".si", prefix + ".si::" + prefix + ".si__yui::" + randomIntBetween(1000, 5000)); + metadata.put( + "segments_" + commitGeneration, + "segments_" + commitGeneration + "::segments_" + commitGeneration + "__exv::" + randomIntBetween(1000, 5000) + ); + return metadata; + } + + private void populateMetadata() throws IOException { + List metadataFiles = List.of("metadata__1__5__abc", "metadata__1__6__pqr", "metadata__2__1__zxv"); + when(remoteMetadataDirectory.listFilesByPrefix(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX)).thenReturn( + metadataFiles + ); + + IndexInput indexInput = mock(IndexInput.class); + Map dummyMetadata = getDummyMetadata("_0", 1); + when(indexInput.readMapOfStrings()).thenReturn(dummyMetadata); + when(remoteMetadataDirectory.openInput("metadata__2__1__zxv", IOContext.DEFAULT)).thenReturn(indexInput); + } + + public void testInit() throws IOException { + populateMetadata(); + + when(remoteMetadataDirectory.listFilesByPrefix(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX)).thenReturn( + List.of("metadata__1__5__abc", "metadata__1__6__pqr", "metadata__2__1__zxv") + ); + + remoteSegmentStoreDirectory.init(); + + Map actualCache = remoteSegmentStoreDirectory + .getSegmentsUploadedToRemoteStore(); + + assertEquals(Set.of("_0.cfe", "_0.cfs", "_0.si", "segments_1"), actualCache.keySet()); + } + + public void testListAll() throws IOException { + populateMetadata(); + + assertEquals(Set.of("_0.cfe", "_0.cfs", "_0.si", "segments_1"), Set.of(remoteSegmentStoreDirectory.listAll())); + } + + public void testDeleteFile() throws IOException { + populateMetadata(); + remoteSegmentStoreDirectory.init(); + + Map uploadedSegments = remoteSegmentStoreDirectory + .getSegmentsUploadedToRemoteStore(); + + assertTrue(uploadedSegments.containsKey("_0.si")); + assertFalse(uploadedSegments.containsKey("_100.si")); + + remoteSegmentStoreDirectory.deleteFile("_0.si"); + remoteSegmentStoreDirectory.deleteFile("_100.si"); + + verify(remoteDataDirectory).deleteFile(startsWith("_0.si")); + verify(remoteDataDirectory, times(0)).deleteFile(startsWith("_100.si")); + assertFalse(uploadedSegments.containsKey("_0.si")); + } + + public void testDeleteFileException() throws IOException { + populateMetadata(); + remoteSegmentStoreDirectory.init(); + + doThrow(new IOException("Error")).when(remoteDataDirectory).deleteFile(any()); + assertThrows(IOException.class, () -> remoteSegmentStoreDirectory.deleteFile("_0.si")); + } + + public void testFileLenght() throws IOException { + populateMetadata(); + remoteSegmentStoreDirectory.init(); + + Map uploadedSegments = remoteSegmentStoreDirectory + .getSegmentsUploadedToRemoteStore(); + + assertTrue(uploadedSegments.containsKey("_0.si")); + + when(remoteDataDirectory.fileLength(startsWith("_0.si"))).thenReturn(1234L); + + assertEquals(1234L, remoteSegmentStoreDirectory.fileLength("_0.si")); + } + + public void testFileLenghtNoSuchFile() throws IOException { + populateMetadata(); + remoteSegmentStoreDirectory.init(); + + Map uploadedSegments = remoteSegmentStoreDirectory + .getSegmentsUploadedToRemoteStore(); + + assertFalse(uploadedSegments.containsKey("_100.si")); + assertThrows(NoSuchFileException.class, () -> remoteSegmentStoreDirectory.fileLength("_100.si")); + } + + public void testCreateOutput() throws IOException { + IndexOutput indexOutput = mock(IndexOutput.class); + when(remoteDataDirectory.createOutput(startsWith("abc"), eq(IOContext.DEFAULT))).thenReturn(indexOutput); + + assertEquals(indexOutput, remoteSegmentStoreDirectory.createOutput("abc", IOContext.DEFAULT)); + } + + public void testCreateOutputException() { + when(remoteDataDirectory.createOutput(startsWith("abc"), eq(IOContext.DEFAULT))).thenThrow(new IOException("Error")); + + assertThrows(IOException.class, () -> remoteSegmentStoreDirectory.createOutput("abc", IOContext.DEFAULT)); + } + + public void testOpenInput() throws IOException { + populateMetadata(); + remoteSegmentStoreDirectory.init(); + + IndexInput indexInput = mock(IndexInput.class); + when(remoteDataDirectory.openInput(startsWith("_0.si"), eq(IOContext.DEFAULT))).thenReturn(indexInput); + + assertEquals(indexInput, remoteSegmentStoreDirectory.openInput("_0.si", IOContext.DEFAULT)); + } + + public void testOpenInputNoSuchFile() { + assertThrows(NoSuchFileException.class, () -> remoteSegmentStoreDirectory.openInput("_0.si", IOContext.DEFAULT)); + } + + public void testOpenInputException() throws IOException { + populateMetadata(); + remoteSegmentStoreDirectory.init(); + + when(remoteDataDirectory.openInput(startsWith("_0.si"), eq(IOContext.DEFAULT))).thenThrow(new IOException("Error")); + + assertThrows(IOException.class, () -> remoteSegmentStoreDirectory.openInput("_0.si", IOContext.DEFAULT)); + } + + public void testCopyFrom() throws IOException { + String filename = "_100.si"; + populateMetadata(); + remoteSegmentStoreDirectory.init(); + + Directory storeDirectory = LuceneTestCase.newDirectory(); + IndexOutput indexOutput = storeDirectory.createOutput(filename, IOContext.DEFAULT); + indexOutput.writeString("Hello World!"); + CodecUtil.writeFooter(indexOutput); + indexOutput.close(); + storeDirectory.sync(List.of(filename)); + + assertFalse(remoteSegmentStoreDirectory.getSegmentsUploadedToRemoteStore().containsKey(filename)); + remoteSegmentStoreDirectory.copyFrom(storeDirectory, filename, filename, IOContext.DEFAULT); + assertTrue(remoteSegmentStoreDirectory.getSegmentsUploadedToRemoteStore().containsKey(filename)); + + storeDirectory.close(); + } + + public void testCopyFromException() throws IOException { + String filename = "_100.si"; + Directory storeDirectory = LuceneTestCase.newDirectory(); + assertFalse(remoteSegmentStoreDirectory.getSegmentsUploadedToRemoteStore().containsKey(filename)); + doThrow(new IOException("Error")).when(remoteDataDirectory).copyFrom(storeDirectory, filename, filename, IOContext.DEFAULT); + + assertThrows(IOException.class, () -> remoteSegmentStoreDirectory.copyFrom(storeDirectory, filename, filename, IOContext.DEFAULT)); + assertFalse(remoteSegmentStoreDirectory.getSegmentsUploadedToRemoteStore().containsKey(filename)); + + storeDirectory.close(); + } + + public void testContainsFile() throws IOException { + populateMetadata(); + remoteSegmentStoreDirectory.init(); + + // This is not the correct way to add files but the other way is to open up access to fields in UploadedSegmentMetadata + Map uploadedSegmentMetadataMap = remoteSegmentStoreDirectory + .getSegmentsUploadedToRemoteStore(); + uploadedSegmentMetadataMap.put( + "_100.si", + new RemoteSegmentStoreDirectory.UploadedSegmentMetadata("_100.si", "_100.si__uuid1", "1234") + ); + + assertTrue(remoteSegmentStoreDirectory.containsFile("_100.si", "1234")); + assertFalse(remoteSegmentStoreDirectory.containsFile("_100.si", "2345")); + assertFalse(remoteSegmentStoreDirectory.containsFile("_200.si", "1234")); + } + + public void testUploadMetadataEmpty() throws IOException { + Directory storeDirectory = mock(Directory.class); + IndexOutput indexOutput = mock(IndexOutput.class); + when(storeDirectory.createOutput(startsWith("metadata__12__o"), eq(IOContext.DEFAULT))).thenReturn(indexOutput); + + Collection segmentFiles = List.of("s1", "s2", "s3"); + assertThrows(NoSuchFileException.class, () -> remoteSegmentStoreDirectory.uploadMetadata(segmentFiles, storeDirectory, 12L, 24L)); + } + + public void testUploadMetadataNonEmpty() throws IOException { + populateMetadata(); + remoteSegmentStoreDirectory.init(); + + Directory storeDirectory = mock(Directory.class); + IndexOutput indexOutput = mock(IndexOutput.class); + when(storeDirectory.createOutput(startsWith("metadata__12__o"), eq(IOContext.DEFAULT))).thenReturn(indexOutput); + + Collection segmentFiles = List.of("_0.si"); + remoteSegmentStoreDirectory.uploadMetadata(segmentFiles, storeDirectory, 12L, 24L); + + verify(remoteMetadataDirectory).copyFrom( + eq(storeDirectory), + startsWith("metadata__12__o"), + startsWith("metadata__12__o"), + eq(IOContext.DEFAULT) + ); + String metadataString = remoteSegmentStoreDirectory.getSegmentsUploadedToRemoteStore().get("_0.si").toString(); + verify(indexOutput).writeMapOfStrings(Map.of("_0.si", metadataString)); + } +} From 0dbbd7292b767fdd7a62280be5807152f7bef0e1 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Mon, 15 Aug 2022 12:18:14 -0400 Subject: [PATCH 082/104] Update Gradle to 7.5.1 (#4211) Signed-off-by: Andriy Redko Signed-off-by: Andriy Redko --- gradle/wrapper/gradle-wrapper.properties | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 24c164f0f1e12..58e9a16f424db 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -11,7 +11,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-7.5-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-7.5.1-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=97a52d145762adc241bad7fd18289bf7f6801e08ece6badf80402fe2b9f250b1 +distributionSha256Sum=db9c8211ed63f61f60292c69e80d89196f9eb36665e369e7f00ac4cc841c2219 From 46d7a47304610f6f6093228e7a0b614d3f839bd4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Aug 2022 12:53:25 -0400 Subject: [PATCH 083/104] Bump com.diffplug.spotless from 6.9.0 to 6.9.1 (#4210) Bumps com.diffplug.spotless from 6.9.0 to 6.9.1. --- updated-dependencies: - dependency-name: com.diffplug.spotless dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.gradle b/build.gradle index e0bb961ce14c2..ce5ea6cdd7e11 100644 --- a/build.gradle +++ b/build.gradle @@ -55,7 +55,7 @@ plugins { id 'lifecycle-base' id 'opensearch.docker-support' id 'opensearch.global-build-info' - id "com.diffplug.spotless" version "6.9.0" apply false + id "com.diffplug.spotless" version "6.9.1" apply false id "org.gradle.test-retry" version "1.4.0" apply false id "test-report-aggregation" id 'jacoco-report-aggregation' From 96bfd1fa320c2f7d54e22587774da7e750adfbf8 Mon Sep 17 00:00:00 2001 From: Rishikesh Pasham <62345295+Rishikesh1159@users.noreply.github.com> Date: Mon, 15 Aug 2022 17:43:10 +0000 Subject: [PATCH 084/104] [Segment Replication] Adding PrimaryMode check before publishing checkpoint and processing a received checkpoint. (#4157) * Adding PrimaryMode check before publishing checkpoint. Signed-off-by: Rishikesh1159 * Applying spotless check Signed-off-by: Rishikesh1159 * Moving segrep specific tests to SegmentReplicationIndexShardTests. Signed-off-by: Rishikesh1159 * Adding logic and tests for rejecting checkpoints if shard is in PrimaryMode. Signed-off-by: Rishikesh1159 * Applying ./gradlew :server:spotlessApply. Signed-off-by: Rishikesh1159 * Applying ./gradlew :server:spotlessApply Signed-off-by: Rishikesh1159 * Changing log level to warn in shouldProcessCheckpoint() of IndexShard.java class. Signed-off-by: Rishikesh1159 * Removing unnecessary lazy logging in shouldProcessCheckpoint(). Signed-off-by: Rishikesh1159 Signed-off-by: Rishikesh1159 --- .../shard/CheckpointRefreshListener.java | 2 +- .../opensearch/index/shard/IndexShard.java | 4 ++ .../index/shard/IndexShardTests.java | 2 +- .../SegmentReplicationIndexShardTests.java | 43 +++++++++++++++++++ .../SegmentReplicationTargetServiceTests.java | 17 ++++++++ 5 files changed, 66 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java index ac6754bf6a74a..96d74bea85920 100644 --- a/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java @@ -40,7 +40,7 @@ public void beforeRefresh() throws IOException { @Override public void afterRefresh(boolean didRefresh) throws IOException { - if (didRefresh) { + if (didRefresh && shard.getReplicationTracker().isPrimaryMode()) { publisher.publish(shard); } } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 53324c94c4b08..ac259f9430a3d 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -1434,6 +1434,10 @@ public final boolean shouldProcessCheckpoint(ReplicationCheckpoint requestCheckp logger.trace(() -> new ParameterizedMessage("Ignoring new replication checkpoint - shard is not started {}", state())); return false; } + if (getReplicationTracker().isPrimaryMode()) { + logger.warn("Ignoring new replication checkpoint - shard is in primaryMode and cannot receive any checkpoints."); + return false; + } ReplicationCheckpoint localCheckpoint = getLatestReplicationCheckpoint(); if (localCheckpoint.isAheadOf(requestCheckpoint)) { logger.trace( diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index 57c2289c848ef..8c00ab97a46ea 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -3522,7 +3522,7 @@ public void testCheckpointRefreshListenerWithNull() throws IOException { } /** - * creates a new initializing shard. The shard will will be put in its proper path under the + * creates a new initializing shard. The shard will be put in its proper path under the * current node id the shard is assigned to. * @param checkpointPublisher Segment Replication Checkpoint Publisher to publish checkpoint */ diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java index 4f2784db93df2..d10f8ced963b7 100644 --- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java @@ -14,11 +14,17 @@ import org.opensearch.index.IndexSettings; import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.replication.OpenSearchIndexLevelReplicationTestCase; +import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.common.ReplicationType; import java.io.IOException; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; + public class SegmentReplicationIndexShardTests extends OpenSearchIndexLevelReplicationTestCase { private static final Settings settings = Settings.builder() @@ -80,4 +86,41 @@ public void testIgnoreShardIdle() throws Exception { replica.awaitShardSearchActive(b -> assertFalse("A new RefreshListener should not be registered", b)); } } + + /** + * here we are starting a new primary shard in PrimaryMode and testing if the shard publishes checkpoint after refresh. + */ + public void testPublishCheckpointOnPrimaryMode() throws IOException { + final SegmentReplicationCheckpointPublisher mock = mock(SegmentReplicationCheckpointPublisher.class); + IndexShard shard = newStartedShard(true); + CheckpointRefreshListener refreshListener = new CheckpointRefreshListener(shard, mock); + refreshListener.afterRefresh(true); + + // verify checkpoint is published + verify(mock, times(1)).publish(any()); + closeShards(shard); + } + + /** + * here we are starting a new primary shard in PrimaryMode initially and starting relocation handoff. Later we complete relocation handoff then shard is no longer + * in PrimaryMode, and we test if the shard does not publish checkpoint after refresh. + */ + public void testPublishCheckpointAfterRelocationHandOff() throws IOException { + final SegmentReplicationCheckpointPublisher mock = mock(SegmentReplicationCheckpointPublisher.class); + IndexShard shard = newStartedShard(true); + CheckpointRefreshListener refreshListener = new CheckpointRefreshListener(shard, mock); + String id = shard.routingEntry().allocationId().getId(); + + // Starting relocation handoff + shard.getReplicationTracker().startRelocationHandoff(id); + + // Completing relocation handoff + shard.getReplicationTracker().completeRelocationHandoff(); + refreshListener.afterRefresh(true); + + // verify checkpoint is not published + verify(mock, times(0)).publish(any()); + closeShards(shard); + } + } diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java index b7cfda5dc1d64..004fa7f614ef1 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -208,6 +208,23 @@ public void testNewCheckpoint_validationPassesAndReplicationFails() throws IOExc closeShard(indexShard, false); } + /** + * here we are starting a new shard in PrimaryMode and testing that we don't process a checkpoint on shard when it is in PrimaryMode. + */ + public void testRejectCheckpointOnShardPrimaryMode() throws IOException { + SegmentReplicationTargetService spy = spy(sut); + + // Starting a new shard in PrimaryMode. + IndexShard primaryShard = newStartedShard(true); + IndexShard spyShard = spy(primaryShard); + doNothing().when(spy).startReplication(any(), any(), any()); + spy.onNewCheckpoint(aheadCheckpoint, spyShard); + + // Verify that checkpoint is not processed as shard is in PrimaryMode. + verify(spy, times(0)).startReplication(any(), any(), any()); + closeShards(primaryShard); + } + public void testReplicationOnDone() throws IOException { SegmentReplicationTargetService spy = spy(sut); IndexShard spyShard = spy(indexShard); From ea4cfcc1f1a4788982e8f7bb3175b0903226ca48 Mon Sep 17 00:00:00 2001 From: Yogev Mets Date: Mon, 15 Aug 2022 23:07:55 +0300 Subject: [PATCH 085/104] Set analyzer to regex query string search (#3967) Sets analyzer to regex query string search Signed-off-by: yyyogev --- .../test/search/190_index_prefix_search.yml | 22 +++++++++++++++++++ .../index/search/QueryStringQueryParser.java | 17 ++++++++------ 2 files changed, 32 insertions(+), 7 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml index 6f276f669f815..ca8e89aa2d5b5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml @@ -17,6 +17,12 @@ setup: id: 1 body: { text: some short words with a stupendously long one } + - do: + index: + index: test + id: 2 + body: { text: sentence with UPPERCASE WORDS } + - do: indices.refresh: index: [test] @@ -76,6 +82,22 @@ setup: - match: {hits.max_score: 1} - match: {hits.hits.0._score: 1} +--- +"search with uppercase regex": + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + query_string: + default_field: text + query: /UPPERCASE/ + + - match: {hits.total: 1} + - match: {hits.max_score: 1} + - match: {hits.hits.0._score: 1} + --- "search index prefixes with span_multi": - do: diff --git a/server/src/main/java/org/opensearch/index/search/QueryStringQueryParser.java b/server/src/main/java/org/opensearch/index/search/QueryStringQueryParser.java index cdb7464ff250a..6d59e861eb32f 100644 --- a/server/src/main/java/org/opensearch/index/search/QueryStringQueryParser.java +++ b/server/src/main/java/org/opensearch/index/search/QueryStringQueryParser.java @@ -56,7 +56,6 @@ import org.apache.lucene.search.SynonymQuery; import org.apache.lucene.search.WildcardQuery; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.automaton.RegExp; import org.opensearch.common.lucene.search.Queries; import org.opensearch.common.regex.Regex; import org.opensearch.common.unit.Fuzziness; @@ -565,7 +564,7 @@ private Query getPrefixQuerySingle(String field, String termStr) throws ParseExc if (currentFieldType == null || currentFieldType.getTextSearchInfo() == TextSearchInfo.NONE) { return newUnmappedFieldQuery(field); } - setAnalyzer(forceAnalyzer == null ? queryBuilder.context.getSearchAnalyzer(currentFieldType) : forceAnalyzer); + setAnalyzer(getSearchAnalyzer(currentFieldType)); Query query = null; if (currentFieldType.getTextSearchInfo().isTokenized() == false) { query = currentFieldType.prefixQuery(termStr, getMultiTermRewriteMethod(), context); @@ -741,6 +740,13 @@ private Query getWildcardQuerySingle(String field, String termStr) throws ParseE } } + private Analyzer getSearchAnalyzer(MappedFieldType currentFieldType) { + if (forceAnalyzer == null) { + return queryBuilder.context.getSearchAnalyzer(currentFieldType); + } + return forceAnalyzer; + } + @Override protected Query getRegexpQuery(String field, String termStr) throws ParseException { final int maxAllowedRegexLength = context.getIndexSettings().getMaxRegexLength(); @@ -781,11 +787,8 @@ private Query getRegexpQuerySingle(String field, String termStr) throws ParseExc if (currentFieldType == null) { return newUnmappedFieldQuery(field); } - if (forceAnalyzer != null) { - setAnalyzer(forceAnalyzer); - return super.getRegexpQuery(field, termStr); - } - return currentFieldType.regexpQuery(termStr, RegExp.ALL, 0, getDeterminizeWorkLimit(), getMultiTermRewriteMethod(), context); + setAnalyzer(getSearchAnalyzer(currentFieldType)); + return super.getRegexpQuery(field, termStr); } catch (RuntimeException e) { if (lenient) { return newLenientFieldQuery(field, e); From 411321e954cd695ec0c23193cc6b67828fdea8df Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Mon, 15 Aug 2022 15:09:46 -0500 Subject: [PATCH 086/104] [Upgrade] to lucene-9.4.0-snapshot-ddf0d0a (#4183) Upgrades to lucene-9.4.0-snapshot-ddf0d0a by refactoring to TermOrdValComparator as a top level class. Signed-off-by: Nicholas Walter Knize --- buildSrc/version.properties | 2 +- .../licenses/lucene-expressions-9.3.0.jar.sha1 | 1 - ...ucene-expressions-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 + .../licenses/lucene-analysis-icu-9.3.0.jar.sha1 | 1 - ...cene-analysis-icu-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 + .../licenses/lucene-analysis-kuromoji-9.3.0.jar.sha1 | 1 - ...analysis-kuromoji-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 + .../licenses/lucene-analysis-nori-9.3.0.jar.sha1 | 1 - ...ene-analysis-nori-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 + .../licenses/lucene-analysis-phonetic-9.3.0.jar.sha1 | 1 - ...analysis-phonetic-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 + .../licenses/lucene-analysis-smartcn-9.3.0.jar.sha1 | 1 - ...-analysis-smartcn-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 + .../licenses/lucene-analysis-stempel-9.3.0.jar.sha1 | 1 - ...-analysis-stempel-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 + .../lucene-analysis-morfologik-9.3.0.jar.sha1 | 1 - ...alysis-morfologik-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 + .../licenses/lucene-analysis-common-9.3.0.jar.sha1 | 1 - ...e-analysis-common-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 + .../licenses/lucene-backward-codecs-9.3.0.jar.sha1 | 1 - ...e-backward-codecs-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 + server/licenses/lucene-core-9.3.0.jar.sha1 | 1 - .../lucene-core-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 + server/licenses/lucene-grouping-9.3.0.jar.sha1 | 1 - .../lucene-grouping-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 + server/licenses/lucene-highlighter-9.3.0.jar.sha1 | 1 - ...ucene-highlighter-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 + server/licenses/lucene-join-9.3.0.jar.sha1 | 1 - .../lucene-join-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 + server/licenses/lucene-memory-9.3.0.jar.sha1 | 1 - .../lucene-memory-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 + server/licenses/lucene-misc-9.3.0.jar.sha1 | 1 - .../lucene-misc-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 + server/licenses/lucene-queries-9.3.0.jar.sha1 | 1 - .../lucene-queries-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 + server/licenses/lucene-queryparser-9.3.0.jar.sha1 | 1 - ...ucene-queryparser-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 + server/licenses/lucene-sandbox-9.3.0.jar.sha1 | 1 - .../lucene-sandbox-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 + server/licenses/lucene-spatial-extras-9.3.0.jar.sha1 | 1 - ...ne-spatial-extras-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 + server/licenses/lucene-spatial3d-9.3.0.jar.sha1 | 1 - .../lucene-spatial3d-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 + server/licenses/lucene-suggest-9.3.0.jar.sha1 | 1 - .../lucene-suggest-9.4.0-snapshot-ddf0d0a.jar.sha1 | 1 + server/src/main/java/org/opensearch/Version.java | 2 +- .../java/org/opensearch/common/lucene/Lucene.java | 2 +- .../org/opensearch/index/codec/CodecService.java | 8 ++++---- .../codec/PerFieldMappingPostingFormatCodec.java | 4 ++-- .../BytesRefFieldComparatorSource.java | 11 ++++------- .../java/org/opensearch/index/codec/CodecTests.java | 12 ++++++------ .../index/engine/CompletionStatsCacheTests.java | 4 ++-- 52 files changed, 43 insertions(+), 46 deletions(-) delete mode 100644 modules/lang-expression/licenses/lucene-expressions-9.3.0.jar.sha1 create mode 100644 modules/lang-expression/licenses/lucene-expressions-9.4.0-snapshot-ddf0d0a.jar.sha1 delete mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0.jar.sha1 create mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.4.0-snapshot-ddf0d0a.jar.sha1 delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0.jar.sha1 create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.4.0-snapshot-ddf0d0a.jar.sha1 delete mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0.jar.sha1 create mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.4.0-snapshot-ddf0d0a.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.4.0-snapshot-ddf0d0a.jar.sha1 delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0.jar.sha1 create mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.4.0-snapshot-ddf0d0a.jar.sha1 delete mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0.jar.sha1 create mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.4.0-snapshot-ddf0d0a.jar.sha1 delete mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0.jar.sha1 create mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.4.0-snapshot-ddf0d0a.jar.sha1 delete mode 100644 server/licenses/lucene-analysis-common-9.3.0.jar.sha1 create mode 100644 server/licenses/lucene-analysis-common-9.4.0-snapshot-ddf0d0a.jar.sha1 delete mode 100644 server/licenses/lucene-backward-codecs-9.3.0.jar.sha1 create mode 100644 server/licenses/lucene-backward-codecs-9.4.0-snapshot-ddf0d0a.jar.sha1 delete mode 100644 server/licenses/lucene-core-9.3.0.jar.sha1 create mode 100644 server/licenses/lucene-core-9.4.0-snapshot-ddf0d0a.jar.sha1 delete mode 100644 server/licenses/lucene-grouping-9.3.0.jar.sha1 create mode 100644 server/licenses/lucene-grouping-9.4.0-snapshot-ddf0d0a.jar.sha1 delete mode 100644 server/licenses/lucene-highlighter-9.3.0.jar.sha1 create mode 100644 server/licenses/lucene-highlighter-9.4.0-snapshot-ddf0d0a.jar.sha1 delete mode 100644 server/licenses/lucene-join-9.3.0.jar.sha1 create mode 100644 server/licenses/lucene-join-9.4.0-snapshot-ddf0d0a.jar.sha1 delete mode 100644 server/licenses/lucene-memory-9.3.0.jar.sha1 create mode 100644 server/licenses/lucene-memory-9.4.0-snapshot-ddf0d0a.jar.sha1 delete mode 100644 server/licenses/lucene-misc-9.3.0.jar.sha1 create mode 100644 server/licenses/lucene-misc-9.4.0-snapshot-ddf0d0a.jar.sha1 delete mode 100644 server/licenses/lucene-queries-9.3.0.jar.sha1 create mode 100644 server/licenses/lucene-queries-9.4.0-snapshot-ddf0d0a.jar.sha1 delete mode 100644 server/licenses/lucene-queryparser-9.3.0.jar.sha1 create mode 100644 server/licenses/lucene-queryparser-9.4.0-snapshot-ddf0d0a.jar.sha1 delete mode 100644 server/licenses/lucene-sandbox-9.3.0.jar.sha1 create mode 100644 server/licenses/lucene-sandbox-9.4.0-snapshot-ddf0d0a.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-extras-9.3.0.jar.sha1 create mode 100644 server/licenses/lucene-spatial-extras-9.4.0-snapshot-ddf0d0a.jar.sha1 delete mode 100644 server/licenses/lucene-spatial3d-9.3.0.jar.sha1 create mode 100644 server/licenses/lucene-spatial3d-9.4.0-snapshot-ddf0d0a.jar.sha1 delete mode 100644 server/licenses/lucene-suggest-9.3.0.jar.sha1 create mode 100644 server/licenses/lucene-suggest-9.4.0-snapshot-ddf0d0a.jar.sha1 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 8e9911294977a..4af1acfed0ab2 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ opensearch = 3.0.0 -lucene = 9.3.0 +lucene = 9.4.0-snapshot-ddf0d0a bundled_jdk_vendor = adoptium bundled_jdk = 17.0.4+8 diff --git a/modules/lang-expression/licenses/lucene-expressions-9.3.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.3.0.jar.sha1 deleted file mode 100644 index 2d216277b3a8e..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5583bcd3a24d3aae40b0a3152458021844ac09aa \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.4.0-snapshot-ddf0d0a.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..ec6906d730ac1 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +9f23e695b0c864fa9722e4f67d950266ca64d37b \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0.jar.sha1 deleted file mode 100644 index df4ae8d72dd2b..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -11dd9be0448fe594cf918f5260e193b3ab4e07a0 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.4.0-snapshot-ddf0d0a.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..83c10845cd35a --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +2f6cb0fd7387c6e0db3b86eef7d8677cea3e88a0 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0.jar.sha1 deleted file mode 100644 index 675bf726d2a65..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -87c1357612f2f483174d1a63ea8c6680a1696bac \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.4.0-snapshot-ddf0d0a.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..29387f38bc10c --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +6aff23715a2fba88d844ac83c61decce8ed480bd \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0.jar.sha1 deleted file mode 100644 index 8987f89c913df..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5d032dbeb3f4015741336a877dd4b0e62099246c \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.4.0-snapshot-ddf0d0a.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..54b451abf5049 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +f82d3eba195134f663865e9de3f511e16fbc7351 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0.jar.sha1 deleted file mode 100644 index 00d66c733c548..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fe6ac8772b545e0abd0c755cd4bd07caad58edb9 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.4.0-snapshot-ddf0d0a.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..87474064fbe0f --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +2af6e1996e696b1721a2ec7382bac9aa5096eecb \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0.jar.sha1 deleted file mode 100644 index 0c521b5f5ef6a..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -288726e13b598c341e81aef8b5c9ce53f51889d0 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.4.0-snapshot-ddf0d0a.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..6d35832a1a643 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +ec01d7f91f711abd75b539bb66a437db7cf1ca67 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0.jar.sha1 deleted file mode 100644 index ba98dd7e06f71..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -166d02f7f98f18c6607335030a404fcad8f57cd6 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.4.0-snapshot-ddf0d0a.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..f93d1a153cd26 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +7041b3fa92b8687a84c4ce666b5718bbbc315db1 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0.jar.sha1 deleted file mode 100644 index 88ac9a13e8ce3..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3c0e4177aa87a4be2826a360f656f3559ea3f997 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.4.0-snapshot-ddf0d0a.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..77589a361badf --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +0a5ec9a237c2539e3cbabfadff707252e33b3599 \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.3.0.jar.sha1 b/server/licenses/lucene-analysis-common-9.3.0.jar.sha1 deleted file mode 100644 index 2e260eb028f4c..0000000000000 --- a/server/licenses/lucene-analysis-common-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -03496708a19a8a55a0dc4f61f8aa2febc6e8977c \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-analysis-common-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..0805590fd6efd --- /dev/null +++ b/server/licenses/lucene-analysis-common-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +3920c527fd5eee69e09f614391cef4e05c581c7f \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.3.0.jar.sha1 b/server/licenses/lucene-backward-codecs-9.3.0.jar.sha1 deleted file mode 100644 index 1dda17ee92fdb..0000000000000 --- a/server/licenses/lucene-backward-codecs-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -95ea01ee0d1e543e18e3cf58d8a6a27a587a7239 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-backward-codecs-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..a8c648e4c192a --- /dev/null +++ b/server/licenses/lucene-backward-codecs-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +d1dfcd42ea257355d5cbc64ac2f47142a550ae52 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.3.0.jar.sha1 b/server/licenses/lucene-core-9.3.0.jar.sha1 deleted file mode 100644 index fd870008c5bd4..0000000000000 --- a/server/licenses/lucene-core-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a030180999bc3f1a65f23f53b38098ca9daeee79 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-core-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..779c9796ceae7 --- /dev/null +++ b/server/licenses/lucene-core-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +ae4757f88e97036b30eb1eac1d21da6dabc85a5e \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.3.0.jar.sha1 b/server/licenses/lucene-grouping-9.3.0.jar.sha1 deleted file mode 100644 index 6f63ca177d3c3..0000000000000 --- a/server/licenses/lucene-grouping-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -883071196e53ec93d2a53dcc8211ee30be6c00dc \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-grouping-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..9167482284f9d --- /dev/null +++ b/server/licenses/lucene-grouping-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +75485e3222b096027463378fe3bb2c8d1f529d25 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.3.0.jar.sha1 b/server/licenses/lucene-highlighter-9.3.0.jar.sha1 deleted file mode 100644 index 78264d8ee3713..0000000000000 --- a/server/licenses/lucene-highlighter-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7e895c49b9991ea2ec08855c425b9eae44a08764 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-highlighter-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..2090b009a57fe --- /dev/null +++ b/server/licenses/lucene-highlighter-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +d2a3d1f326f6d3bd6033b5620dc84f3c20a0412d \ No newline at end of file diff --git a/server/licenses/lucene-join-9.3.0.jar.sha1 b/server/licenses/lucene-join-9.3.0.jar.sha1 deleted file mode 100644 index 5e641f5f01075..0000000000000 --- a/server/licenses/lucene-join-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -04baaae4ce4a35ae919150dd17cd1e63b0da9d24 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-join-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..df74fa911f7d2 --- /dev/null +++ b/server/licenses/lucene-join-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +2c6f6058c765a955e0544c6050aeee3a5e376e47 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.3.0.jar.sha1 b/server/licenses/lucene-memory-9.3.0.jar.sha1 deleted file mode 100644 index c8e86c7674ede..0000000000000 --- a/server/licenses/lucene-memory-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1a2203b332edc1366b9789f5286296e109dbc8c4 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-memory-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..3e9d65d73d36d --- /dev/null +++ b/server/licenses/lucene-memory-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +2a155679022106c7db356da32563580d8de043d7 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.3.0.jar.sha1 b/server/licenses/lucene-misc-9.3.0.jar.sha1 deleted file mode 100644 index 11a459a9f52ba..0000000000000 --- a/server/licenses/lucene-misc-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -61b502c9557247b6803a346c0bab20c9dc89d125 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-misc-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..f056cfe5b86ef --- /dev/null +++ b/server/licenses/lucene-misc-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +7c7ac2027a12bf02657ec3a421c8039736b98344 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.3.0.jar.sha1 b/server/licenses/lucene-queries-9.3.0.jar.sha1 deleted file mode 100644 index 2b577bd33b46a..0000000000000 --- a/server/licenses/lucene-queries-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d8fe3bce3c05015c5fdb78279f36b9f1a75b98d8 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-queries-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..41ea4a342f949 --- /dev/null +++ b/server/licenses/lucene-queries-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +25978bb82b9f78537f0511f0be64253be19de6fd \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.3.0.jar.sha1 b/server/licenses/lucene-queryparser-9.3.0.jar.sha1 deleted file mode 100644 index b106860bf9f3e..0000000000000 --- a/server/licenses/lucene-queryparser-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -78f259a66d48f77a2d2b96a0a858efa08eba72dc \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-queryparser-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..e0687571df957 --- /dev/null +++ b/server/licenses/lucene-queryparser-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +e3b6ce41d5bd73fdcc80b5b2a40283c03525aa96 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.3.0.jar.sha1 b/server/licenses/lucene-sandbox-9.3.0.jar.sha1 deleted file mode 100644 index 82c2c6d85ca4c..0000000000000 --- a/server/licenses/lucene-sandbox-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5ee318cf8e9a70c2c99e03e157465316a3d4a17a \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-sandbox-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..e03c731914757 --- /dev/null +++ b/server/licenses/lucene-sandbox-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +ae8649d2d01a416acdbe7c29f14b47a5594acb85 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.3.0.jar.sha1 b/server/licenses/lucene-spatial-extras-9.3.0.jar.sha1 deleted file mode 100644 index 8bbc5359487ff..0000000000000 --- a/server/licenses/lucene-spatial-extras-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c9b226b49ae987a4226791f023562187583eb9ad \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-spatial-extras-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..ea8b5cd1ddb1d --- /dev/null +++ b/server/licenses/lucene-spatial-extras-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +58049352bb5fc8683c389eb2eb879fb4f16ff9b3 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.3.0.jar.sha1 b/server/licenses/lucene-spatial3d-9.3.0.jar.sha1 deleted file mode 100644 index 31132ef0ad6df..0000000000000 --- a/server/licenses/lucene-spatial3d-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -201aa61856ae44fa494504591aed54fd9b75af16 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-spatial3d-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..a0c0dbe07af8f --- /dev/null +++ b/server/licenses/lucene-spatial3d-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +2d3a8f802e1bb439d945de81ba6b16d01b24d58a \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.3.0.jar.sha1 b/server/licenses/lucene-suggest-9.3.0.jar.sha1 deleted file mode 100644 index 71a263aa163f8..0000000000000 --- a/server/licenses/lucene-suggest-9.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fb5d7243ba67616edbda1ecf421c615dd595752d \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.4.0-snapshot-ddf0d0a.jar.sha1 b/server/licenses/lucene-suggest-9.4.0-snapshot-ddf0d0a.jar.sha1 new file mode 100644 index 0000000000000..1f332eac16c72 --- /dev/null +++ b/server/licenses/lucene-suggest-9.4.0-snapshot-ddf0d0a.jar.sha1 @@ -0,0 +1 @@ +11cdb21cf08feb19e074b4a101e1550dfd544379 \ No newline at end of file diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index 57f49f9c54591..27a39db1c50cb 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -96,7 +96,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_1_1 = new Version(2010199, org.apache.lucene.util.Version.LUCENE_9_2_0); public static final Version V_2_2_0 = new Version(2020099, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_2_3_0 = new Version(2030099, org.apache.lucene.util.Version.LUCENE_9_3_0); - public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_3_0); + public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_4_0); public static final Version CURRENT = V_3_0_0; public static Version readVersion(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/common/lucene/Lucene.java b/server/src/main/java/org/opensearch/common/lucene/Lucene.java index 74be762bfbcf9..2692a8fa2b914 100644 --- a/server/src/main/java/org/opensearch/common/lucene/Lucene.java +++ b/server/src/main/java/org/opensearch/common/lucene/Lucene.java @@ -125,7 +125,7 @@ * @opensearch.internal */ public class Lucene { - public static final String LATEST_CODEC = "Lucene92"; + public static final String LATEST_CODEC = "Lucene94"; public static final String SOFT_DELETES_FIELD = "__soft_deletes"; diff --git a/server/src/main/java/org/opensearch/index/codec/CodecService.java b/server/src/main/java/org/opensearch/index/codec/CodecService.java index ff254a63fadb6..b1e73b3855759 100644 --- a/server/src/main/java/org/opensearch/index/codec/CodecService.java +++ b/server/src/main/java/org/opensearch/index/codec/CodecService.java @@ -34,8 +34,8 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.Codec; -import org.apache.lucene.codecs.lucene92.Lucene92Codec; -import org.apache.lucene.codecs.lucene92.Lucene92Codec.Mode; +import org.apache.lucene.codecs.lucene94.Lucene94Codec; +import org.apache.lucene.codecs.lucene94.Lucene94Codec.Mode; import org.opensearch.common.Nullable; import org.opensearch.common.collect.MapBuilder; import org.opensearch.index.mapper.MapperService; @@ -62,8 +62,8 @@ public class CodecService { public CodecService(@Nullable MapperService mapperService, Logger logger) { final MapBuilder codecs = MapBuilder.newMapBuilder(); if (mapperService == null) { - codecs.put(DEFAULT_CODEC, new Lucene92Codec()); - codecs.put(BEST_COMPRESSION_CODEC, new Lucene92Codec(Mode.BEST_COMPRESSION)); + codecs.put(DEFAULT_CODEC, new Lucene94Codec()); + codecs.put(BEST_COMPRESSION_CODEC, new Lucene94Codec(Mode.BEST_COMPRESSION)); } else { codecs.put(DEFAULT_CODEC, new PerFieldMappingPostingFormatCodec(Mode.BEST_SPEED, mapperService, logger)); codecs.put(BEST_COMPRESSION_CODEC, new PerFieldMappingPostingFormatCodec(Mode.BEST_COMPRESSION, mapperService, logger)); diff --git a/server/src/main/java/org/opensearch/index/codec/PerFieldMappingPostingFormatCodec.java b/server/src/main/java/org/opensearch/index/codec/PerFieldMappingPostingFormatCodec.java index fd0c66983208a..c101321e47350 100644 --- a/server/src/main/java/org/opensearch/index/codec/PerFieldMappingPostingFormatCodec.java +++ b/server/src/main/java/org/opensearch/index/codec/PerFieldMappingPostingFormatCodec.java @@ -36,7 +36,7 @@ import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.DocValuesFormat; import org.apache.lucene.codecs.PostingsFormat; -import org.apache.lucene.codecs.lucene92.Lucene92Codec; +import org.apache.lucene.codecs.lucene94.Lucene94Codec; import org.apache.lucene.codecs.lucene90.Lucene90DocValuesFormat; import org.opensearch.common.lucene.Lucene; import org.opensearch.index.mapper.CompletionFieldMapper; @@ -53,7 +53,7 @@ * * @opensearch.internal */ -public class PerFieldMappingPostingFormatCodec extends Lucene92Codec { +public class PerFieldMappingPostingFormatCodec extends Lucene94Codec { private final Logger logger; private final MapperService mapperService; private final DocValuesFormat dvFormat = new Lucene90DocValuesFormat(); diff --git a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java index e816b366c3153..d7ce6ae8aba3e 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java +++ b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java @@ -40,6 +40,7 @@ import org.apache.lucene.search.FieldComparator; import org.apache.lucene.search.Scorable; import org.apache.lucene.search.SortField; +import org.apache.lucene.search.comparators.TermOrdValComparator; import org.apache.lucene.util.BitSet; import org.apache.lucene.util.BytesRef; import org.opensearch.common.util.BigArrays; @@ -99,7 +100,7 @@ public FieldComparator newComparator(String fieldname, int numHits, boolean e final boolean sortMissingLast = sortMissingLast(missingValue) ^ reversed; final BytesRef missingBytes = (BytesRef) missingObject(missingValue, reversed); if (indexFieldData instanceof IndexOrdinalsFieldData) { - return new FieldComparator.TermOrdValComparator(numHits, null, sortMissingLast) { + FieldComparator cmp = new TermOrdValComparator(numHits, indexFieldData.getFieldName(), sortMissingLast, reversed) { @Override protected SortedDocValues getSortedDocValues(LeafReaderContext context, String field) throws IOException { @@ -121,13 +122,9 @@ protected SortedDocValues getSortedDocValues(LeafReaderContext context, String f return new ReplaceMissing(selectedValues, missingBytes); } } - - @Override - public void setScorer(Scorable scorer) { - BytesRefFieldComparatorSource.this.setScorer(scorer); - } - }; + cmp.disableSkipping(); + return cmp; } return new FieldComparator.TermValComparator(numHits, null, sortMissingLast) { diff --git a/server/src/test/java/org/opensearch/index/codec/CodecTests.java b/server/src/test/java/org/opensearch/index/codec/CodecTests.java index 0275066f9af1b..0a6338333bffc 100644 --- a/server/src/test/java/org/opensearch/index/codec/CodecTests.java +++ b/server/src/test/java/org/opensearch/index/codec/CodecTests.java @@ -34,7 +34,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.lucene.codecs.Codec; -import org.apache.lucene.codecs.lucene92.Lucene92Codec; +import org.apache.lucene.codecs.lucene94.Lucene94Codec; import org.apache.lucene.codecs.lucene90.Lucene90StoredFieldsFormat; import org.apache.lucene.document.Document; import org.apache.lucene.index.DirectoryReader; @@ -65,21 +65,21 @@ public class CodecTests extends OpenSearchTestCase { public void testResolveDefaultCodecs() throws Exception { CodecService codecService = createCodecService(); assertThat(codecService.codec("default"), instanceOf(PerFieldMappingPostingFormatCodec.class)); - assertThat(codecService.codec("default"), instanceOf(Lucene92Codec.class)); + assertThat(codecService.codec("default"), instanceOf(Lucene94Codec.class)); } public void testDefault() throws Exception { Codec codec = createCodecService().codec("default"); - assertStoredFieldsCompressionEquals(Lucene92Codec.Mode.BEST_SPEED, codec); + assertStoredFieldsCompressionEquals(Lucene94Codec.Mode.BEST_SPEED, codec); } public void testBestCompression() throws Exception { Codec codec = createCodecService().codec("best_compression"); - assertStoredFieldsCompressionEquals(Lucene92Codec.Mode.BEST_COMPRESSION, codec); + assertStoredFieldsCompressionEquals(Lucene94Codec.Mode.BEST_COMPRESSION, codec); } // write some docs with it, inspect .si to see this was the used compression - private void assertStoredFieldsCompressionEquals(Lucene92Codec.Mode expected, Codec actual) throws Exception { + private void assertStoredFieldsCompressionEquals(Lucene94Codec.Mode expected, Codec actual) throws Exception { Directory dir = newDirectory(); IndexWriterConfig iwc = newIndexWriterConfig(null); iwc.setCodec(actual); @@ -91,7 +91,7 @@ private void assertStoredFieldsCompressionEquals(Lucene92Codec.Mode expected, Co SegmentReader sr = (SegmentReader) ir.leaves().get(0).reader(); String v = sr.getSegmentInfo().info.getAttribute(Lucene90StoredFieldsFormat.MODE_KEY); assertNotNull(v); - assertEquals(expected, Lucene92Codec.Mode.valueOf(v)); + assertEquals(expected, Lucene94Codec.Mode.valueOf(v)); ir.close(); dir.close(); } diff --git a/server/src/test/java/org/opensearch/index/engine/CompletionStatsCacheTests.java b/server/src/test/java/org/opensearch/index/engine/CompletionStatsCacheTests.java index 340811352a203..575997dc2609e 100644 --- a/server/src/test/java/org/opensearch/index/engine/CompletionStatsCacheTests.java +++ b/server/src/test/java/org/opensearch/index/engine/CompletionStatsCacheTests.java @@ -32,7 +32,7 @@ package org.opensearch.index.engine; import org.apache.lucene.codecs.PostingsFormat; -import org.apache.lucene.codecs.lucene92.Lucene92Codec; +import org.apache.lucene.codecs.lucene94.Lucene94Codec; import org.apache.lucene.document.Document; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriter; @@ -70,7 +70,7 @@ public void testExceptionsAreNotCached() { public void testCompletionStatsCache() throws IOException, InterruptedException { final IndexWriterConfig indexWriterConfig = newIndexWriterConfig(); final PostingsFormat postingsFormat = new Completion90PostingsFormat(); - indexWriterConfig.setCodec(new Lucene92Codec() { + indexWriterConfig.setCodec(new Lucene94Codec() { @Override public PostingsFormat getPostingsFormatForField(String field) { return postingsFormat; // all fields are suggest fields From a081f2f13e5df63dcc0897ded2154d7eab3f306a Mon Sep 17 00:00:00 2001 From: Pranav Garg Date: Tue, 16 Aug 2022 04:19:43 +0530 Subject: [PATCH 087/104] Adding documentation about creation of uber-JARs (#3785) * Adding documentation about creation of uber-JARs Signed-off-by: Pranav Garg * Fixing linelint error Signed-off-by: Pranav Garg * Comprehensive changes Signed-off-by: Pranav Garg * Adding PR changes Signed-off-by: Pranav Garg * PR changes Signed-off-by: Pranav Garg Signed-off-by: Pranav Garg Co-authored-by: Pranav Garg --- DEVELOPER_GUIDE.md | 43 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 41 insertions(+), 2 deletions(-) diff --git a/DEVELOPER_GUIDE.md b/DEVELOPER_GUIDE.md index ce84d9658a808..8c2a6b4889122 100644 --- a/DEVELOPER_GUIDE.md +++ b/DEVELOPER_GUIDE.md @@ -4,7 +4,8 @@ - [Install Prerequisites](#install-prerequisites) - [JDK 11](#jdk-11) - [JDK 14](#jdk-14) - - [Runtime JDK](#runtime-jdk) + - [JDK 17](#jdk-17) + - [Custom Runtime JDK](#custom-runtime-jdk) - [Windows](#windows) - [Docker](#docker) - [Build](#build) @@ -12,6 +13,7 @@ - [Run OpenSearch](#run-opensearch) - [Use an Editor](#use-an-editor) - [IntelliJ IDEA](#intellij-idea) + - [Remote development using JetBrains Gateway](#remote-development-using-jetbrains-gateway) - [Visual Studio Code](#visual-studio-code) - [Eclipse](#eclipse) - [Project Layout](#project-layout) @@ -35,6 +37,7 @@ - [testImplementation](#testimplementation) - [Gradle Plugins](#gradle-plugins) - [Distribution Download Plugin](#distribution-download-plugin) + - [Creating fat-JAR of a Module](#creating-fat-jar-of-a-module) - [Misc](#misc) - [git-secrets](#git-secrets) - [Installation](#installation) @@ -49,7 +52,7 @@ - [Submitting Changes](#submitting-changes) - [Backports](#backports) - [LineLint](#linelint) - - [Lucene Snapshots](#lucene-snapshots) +- [Lucene Snapshots](#lucene-snapshots) # Developer Guide @@ -374,6 +377,42 @@ The Distribution Download plugin downloads the latest version of OpenSearch by d ./gradlew integTest -PcustomDistributionUrl="https://ci.opensearch.org/ci/dbc/bundle-build/1.2.0/1127/linux/x64/dist/opensearch-1.2.0-linux-x64.tar.gz" ``` +### Creating fat-JAR of a Module + +A fat-JAR (or an uber-JAR) is the JAR, which contains classes from all the libraries, on which your project depends and, of course, the classes of current project. + +There might be cases where a developer would like to add some custom logic to the code of a module (or multiple modules) and generate a fat-JAR that can be directly used by the dependency management tool. For example, in [#3665](https://github.com/opensearch-project/OpenSearch/pull/3665) a developer wanted to provide a tentative patch as a fat-JAR to a consumer for changes made in the high level REST client. + +Use [Gradle Shadow plugin](https://imperceptiblethoughts.com/shadow/). +Add the following to the `build.gradle` file of the module for which you want to create the fat-JAR, e.g. `client/rest-high-level/build.gradle`: + +``` +apply plugin: 'com.github.johnrengelman.shadow' +``` + +Run the `shadowJar` command using: +``` +./gradlew :client:rest-high-level:shadowJar +``` + +This will generate a fat-JAR in the `build/distributions` folder of the module, e.g. .`/client/rest-high-level/build/distributions/opensearch-rest-high-level-client-1.4.0-SNAPSHOT.jar`. + +You can further customize your fat-JAR by customising the plugin, More information about shadow plugin can be found [here](https://imperceptiblethoughts.com/shadow/). + +To use the generated JAR, install the JAR locally, e.g. +``` +mvn install:install-file -Dfile=src/main/resources/opensearch-rest-high-level-client-1.4.0-SNAPSHOT.jar -DgroupId=org.opensearch.client -DartifactId=opensearch-rest-high-level-client -Dversion=1.4.0-SNAPSHOT -Dpackaging=jar -DgeneratePom=true +``` + +Refer the installed JAR as any other maven artifact, e.g. + +``` + + org.opensearch.client + opensearch-rest-high-level-client + 1.4.0-SNAPSHOT + +``` ## Misc From 280b938b562fc2088a9afa8a8547f1adb96a84a5 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Mon, 15 Aug 2022 21:34:09 -0700 Subject: [PATCH 088/104] Segment Replication - Remove unnecessary call to markAllocationIdAsInSync. (#4224) This PR Removes an unnecessary call to markAllocationIdAsInSync on the primary shard when replication events complete. Recovery will manage this initial call. Signed-off-by: Marc Handalian Signed-off-by: Marc Handalian --- .../replication/SegmentReplicationSourceHandler.java | 8 -------- 1 file changed, 8 deletions(-) diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java index 46bad7951a2e1..ce764900e433f 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java @@ -147,14 +147,6 @@ public synchronized void sendFiles(GetSegmentFilesRequest request, ActionListene transfer.start(); sendFileStep.whenComplete(r -> { - final String targetAllocationId = request.getTargetAllocationId(); - RunUnderPrimaryPermit.run( - () -> shard.markAllocationIdAsInSync(targetAllocationId, request.getCheckpoint().getSeqNo()), - shard.shardId() + " marking " + targetAllocationId + " as in sync", - shard, - cancellableThreads, - logger - ); try { future.onResponse(new GetSegmentFilesResponse(List.of(storeFileMetadata))); } finally { From 1e47b9528f9cb9b37ab84438467485c67ffe12ad Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Tue, 16 Aug 2022 09:01:42 -0500 Subject: [PATCH 089/104] [MUTE] search with uppercase regex in bwc testing (#4226) Prevents "search with uppercase regex" in 190_index_prefix_search.yml from running in bwc testing until feature is backported. Signed-off-by: Nicholas Walter Knize --- .../rest-api-spec/test/search/190_index_prefix_search.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml index ca8e89aa2d5b5..25d3dd160e031 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml @@ -84,6 +84,9 @@ setup: --- "search with uppercase regex": + - skip: + version: " - 2.99.99" + reason: uppercase regex not supported before 3.0.0 - do: search: rest_total_hits_as_int: true From 551f7c3481040a3b4b051a27eec973c90fb5def6 Mon Sep 17 00:00:00 2001 From: Bukhtawar Khan Date: Tue, 16 Aug 2022 22:18:56 +0530 Subject: [PATCH 090/104] Introduce TranslogFactory for Local/Remote Translog support (#4172) * Introduce TranslogFactory for Local/Remote Translog support Signed-off-by: Bukhtawar Khan --- .../opensearch/index/shard/IndexShardIT.java | 2 + .../org/opensearch/index/IndexService.java | 3 ++ .../opensearch/index/engine/EngineConfig.java | 19 ++++++++- .../index/engine/EngineConfigFactory.java | 7 +++- .../index/engine/InternalEngine.java | 3 +- .../index/engine/NRTReplicationEngine.java | 3 +- .../opensearch/index/engine/NoOpEngine.java | 17 ++++---- .../index/engine/ReadOnlyEngine.java | 17 ++++---- .../opensearch/index/shard/IndexShard.java | 7 +++- .../translog/InternalTranslogFactory.java | 41 +++++++++++++++++++ .../translog/InternalTranslogManager.java | 10 +++-- .../index/translog/TranslogFactory.java | 32 +++++++++++++++ .../translog/WriteOnlyTranslogManager.java | 6 ++- .../engine/EngineConfigFactoryTests.java | 7 +++- .../index/engine/EngineConfigTests.java | 4 +- .../InternalTranslogManagerTests.java | 21 ++++++---- .../index/shard/IndexShardTestCase.java | 2 + 17 files changed, 162 insertions(+), 39 deletions(-) create mode 100644 server/src/main/java/org/opensearch/index/translog/InternalTranslogFactory.java create mode 100644 server/src/main/java/org/opensearch/index/translog/TranslogFactory.java diff --git a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java index 2bf73b34247b3..3d8da7eac7690 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java @@ -78,6 +78,7 @@ import org.opensearch.index.mapper.SourceToParse; import org.opensearch.index.seqno.RetentionLeaseSyncer; import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.index.translog.InternalTranslogFactory; import org.opensearch.index.translog.TestTranslog; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.TranslogStats; @@ -675,6 +676,7 @@ public static final IndexShard newIndexShard( () -> {}, RetentionLeaseSyncer.EMPTY, cbs, + new InternalTranslogFactory(), SegmentReplicationCheckpointPublisher.EMPTY, null ); diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index 210df9d342cb7..38065bf5aed20 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -88,6 +88,7 @@ import org.opensearch.index.shard.ShardPath; import org.opensearch.index.similarity.SimilarityService; import org.opensearch.index.store.Store; +import org.opensearch.index.translog.InternalTranslogFactory; import org.opensearch.index.translog.Translog; import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.indices.cluster.IndicesClusterStateService; @@ -547,6 +548,8 @@ public synchronized IndexShard createShard( () -> globalCheckpointSyncer.accept(shardId), retentionLeaseSyncer, circuitBreakerService, + // TODO Replace with remote translog factory in the follow up PR + this.indexSettings.isRemoteTranslogStoreEnabled() ? null : new InternalTranslogFactory(), this.indexSettings.isSegRepEnabled() && routing.primary() ? checkpointPublisher : null, remoteStore ); diff --git a/server/src/main/java/org/opensearch/index/engine/EngineConfig.java b/server/src/main/java/org/opensearch/index/engine/EngineConfig.java index 4ae6646ed14f0..ba30103f70269 100644 --- a/server/src/main/java/org/opensearch/index/engine/EngineConfig.java +++ b/server/src/main/java/org/opensearch/index/engine/EngineConfig.java @@ -51,8 +51,10 @@ import org.opensearch.index.seqno.RetentionLeases; import org.opensearch.index.shard.ShardId; import org.opensearch.index.store.Store; +import org.opensearch.index.translog.InternalTranslogFactory; import org.opensearch.index.translog.TranslogConfig; import org.opensearch.index.translog.TranslogDeletionPolicyFactory; +import org.opensearch.index.translog.TranslogFactory; import org.opensearch.indices.IndexingMemoryController; import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.threadpool.ThreadPool; @@ -150,6 +152,8 @@ public Supplier retentionLeasesSupplier() { private final TranslogConfig translogConfig; + private final TranslogFactory translogFactory; + public EngineConfig( ShardId shardId, ThreadPool threadPool, @@ -253,7 +257,8 @@ public EngineConfig( retentionLeasesSupplier, primaryTermSupplier, tombstoneDocSupplier, - false + false, + new InternalTranslogFactory() ); } @@ -284,7 +289,8 @@ public EngineConfig( Supplier retentionLeasesSupplier, LongSupplier primaryTermSupplier, TombstoneDocSupplier tombstoneDocSupplier, - boolean isReadOnlyReplica + boolean isReadOnlyReplica, + TranslogFactory translogFactory ) { if (isReadOnlyReplica && indexSettings.isSegRepEnabled() == false) { throw new IllegalArgumentException("Shard can only be wired as a read only replica with Segment Replication enabled"); @@ -328,6 +334,7 @@ public EngineConfig( this.primaryTermSupplier = primaryTermSupplier; this.tombstoneDocSupplier = tombstoneDocSupplier; this.isReadOnlyReplica = isReadOnlyReplica; + this.translogFactory = translogFactory; } /** @@ -532,6 +539,14 @@ public boolean isReadOnlyReplica() { return indexSettings.isSegRepEnabled() && isReadOnlyReplica; } + /** + * Returns the underlying translog factory + * @return the translog factory + */ + public TranslogFactory getTranslogFactory() { + return translogFactory; + } + /** * A supplier supplies tombstone documents which will be used in soft-update methods. * The returned document consists only _uid, _seqno, _term and _version fields; other metadata fields are excluded. diff --git a/server/src/main/java/org/opensearch/index/engine/EngineConfigFactory.java b/server/src/main/java/org/opensearch/index/engine/EngineConfigFactory.java index c8aec3570f8b5..f0db086e47816 100644 --- a/server/src/main/java/org/opensearch/index/engine/EngineConfigFactory.java +++ b/server/src/main/java/org/opensearch/index/engine/EngineConfigFactory.java @@ -28,6 +28,7 @@ import org.opensearch.index.store.Store; import org.opensearch.index.translog.TranslogConfig; import org.opensearch.index.translog.TranslogDeletionPolicyFactory; +import org.opensearch.index.translog.TranslogFactory; import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.plugins.EnginePlugin; import org.opensearch.plugins.PluginsService; @@ -147,7 +148,8 @@ public EngineConfig newEngineConfig( Supplier retentionLeasesSupplier, LongSupplier primaryTermSupplier, EngineConfig.TombstoneDocSupplier tombstoneDocSupplier, - boolean isReadOnlyReplica + boolean isReadOnlyReplica, + TranslogFactory translogFactory ) { CodecService codecServiceToUse = codecService; if (codecService == null && this.codecServiceFactory != null) { @@ -178,7 +180,8 @@ public EngineConfig newEngineConfig( retentionLeasesSupplier, primaryTermSupplier, tombstoneDocSupplier, - isReadOnlyReplica + isReadOnlyReplica, + translogFactory ); } diff --git a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java index 7d90c2ad653be..16599141b1345 100644 --- a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java @@ -289,7 +289,8 @@ public void onFailure(String reason, Exception ex) { () -> getLocalCheckpointTracker(), translogUUID, new CompositeTranslogEventListener(Arrays.asList(internalTranslogEventListener, translogEventListener), shardId), - this::ensureOpen + this::ensureOpen, + engineConfig.getTranslogFactory() ); this.translogManager = translogManagerRef; this.softDeletesPolicy = newSoftDeletesPolicy(); diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java index 1f9306cf51e1e..a481203ba3dea 100644 --- a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java @@ -101,7 +101,8 @@ public void onAfterTranslogSync() { } } }, - this + this, + engineConfig.getTranslogFactory() ); this.translogManager = translogManagerRef; } catch (IOException e) { diff --git a/server/src/main/java/org/opensearch/index/engine/NoOpEngine.java b/server/src/main/java/org/opensearch/index/engine/NoOpEngine.java index 4bc37084675ea..f6c5bf7640a73 100644 --- a/server/src/main/java/org/opensearch/index/engine/NoOpEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/NoOpEngine.java @@ -195,14 +195,15 @@ public void trimUnreferencedTranslogFiles() throws TranslogException { final TranslogDeletionPolicy translogDeletionPolicy = new DefaultTranslogDeletionPolicy(-1, -1, 0); translogDeletionPolicy.setLocalCheckpointOfSafeCommit(localCheckpoint); try ( - Translog translog = new Translog( - translogConfig, - translogUuid, - translogDeletionPolicy, - engineConfig.getGlobalCheckpointSupplier(), - engineConfig.getPrimaryTermSupplier(), - seqNo -> {} - ) + Translog translog = engineConfig.getTranslogFactory() + .newTranslog( + translogConfig, + translogUuid, + translogDeletionPolicy, + engineConfig.getGlobalCheckpointSupplier(), + engineConfig.getPrimaryTermSupplier(), + seqNo -> {} + ) ) { translog.trimUnreferencedReaders(); // refresh the translog stats diff --git a/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java index cebe262fee5d1..f426768119c1d 100644 --- a/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java @@ -258,14 +258,15 @@ private static TranslogStats translogStats(final EngineConfig config, final Segm final long localCheckpoint = Long.parseLong(infos.getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)); translogDeletionPolicy.setLocalCheckpointOfSafeCommit(localCheckpoint); try ( - Translog translog = new Translog( - translogConfig, - translogUuid, - translogDeletionPolicy, - config.getGlobalCheckpointSupplier(), - config.getPrimaryTermSupplier(), - seqNo -> {} - ) + Translog translog = config.getTranslogFactory() + .newTranslog( + translogConfig, + translogUuid, + translogDeletionPolicy, + config.getGlobalCheckpointSupplier(), + config.getPrimaryTermSupplier(), + seqNo -> {} + ) ) { return translog.stats(); } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index ac259f9430a3d..8970e0734f184 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -151,6 +151,7 @@ import org.opensearch.index.store.StoreStats; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.TranslogConfig; +import org.opensearch.index.translog.TranslogFactory; import org.opensearch.index.translog.TranslogRecoveryRunner; import org.opensearch.index.translog.TranslogStats; import org.opensearch.index.warmer.ShardIndexWarmerService; @@ -308,6 +309,7 @@ Runnable getGlobalCheckpointSyncer() { private final ReferenceManager.RefreshListener checkpointRefreshListener; private final Store remoteStore; + private final TranslogFactory translogFactory; public IndexShard( final ShardRouting shardRouting, @@ -330,6 +332,7 @@ public IndexShard( final Runnable globalCheckpointSyncer, final RetentionLeaseSyncer retentionLeaseSyncer, final CircuitBreakerService circuitBreakerService, + final TranslogFactory translogFactory, @Nullable final SegmentReplicationCheckpointPublisher checkpointPublisher, @Nullable final Store remoteStore ) throws IOException { @@ -420,6 +423,7 @@ public boolean shouldCache(Query query) { this.checkpointRefreshListener = null; } this.remoteStore = remoteStore; + this.translogFactory = translogFactory; } public ThreadPool getThreadPool() { @@ -3254,7 +3258,8 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) thro replicationTracker::getRetentionLeases, () -> getOperationPrimaryTerm(), tombstoneDocSupplier(), - indexSettings.isSegRepEnabled() && shardRouting.primary() == false + indexSettings.isSegRepEnabled() && shardRouting.primary() == false, + translogFactory ); } diff --git a/server/src/main/java/org/opensearch/index/translog/InternalTranslogFactory.java b/server/src/main/java/org/opensearch/index/translog/InternalTranslogFactory.java new file mode 100644 index 0000000000000..566eda4fe4a6e --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/InternalTranslogFactory.java @@ -0,0 +1,41 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog; + +import java.io.IOException; +import java.util.function.LongConsumer; +import java.util.function.LongSupplier; + +/** + * Translog Factory for the local on-disk {@link Translog} + * + * @opensearch.internal + */ +public class InternalTranslogFactory implements TranslogFactory { + + @Override + public Translog newTranslog( + TranslogConfig translogConfig, + String translogUUID, + TranslogDeletionPolicy translogDeletionPolicy, + LongSupplier globalCheckpointSupplier, + LongSupplier primaryTermSupplier, + LongConsumer persistedSequenceNumberConsumer + ) throws IOException { + + return new Translog( + translogConfig, + translogUUID, + translogDeletionPolicy, + globalCheckpointSupplier, + primaryTermSupplier, + persistedSequenceNumberConsumer + ); + } +} diff --git a/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java b/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java index ac82cf246cc55..fd52e02132006 100644 --- a/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java +++ b/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java @@ -54,7 +54,8 @@ public InternalTranslogManager( Supplier localCheckpointTrackerSupplier, String translogUUID, TranslogEventListener translogEventListener, - LifecycleAware engineLifeCycleAware + LifecycleAware engineLifeCycleAware, + TranslogFactory translogFactory ) throws IOException { this.shardId = shardId; this.readLock = readLock; @@ -67,7 +68,7 @@ public InternalTranslogManager( if (tracker != null) { tracker.markSeqNoAsPersisted(seqNo); } - }, translogUUID); + }, translogUUID, translogFactory); assert translog.getGeneration() != null; this.translog = translog; assert pendingTranslogRecovery.get() == false : "translog recovery can't be pending before we set it"; @@ -333,10 +334,11 @@ protected Translog openTranslog( TranslogDeletionPolicy translogDeletionPolicy, LongSupplier globalCheckpointSupplier, LongConsumer persistedSequenceNumberConsumer, - String translogUUID + String translogUUID, + TranslogFactory translogFactory ) throws IOException { - return new Translog( + return translogFactory.newTranslog( translogConfig, translogUUID, translogDeletionPolicy, diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogFactory.java b/server/src/main/java/org/opensearch/index/translog/TranslogFactory.java new file mode 100644 index 0000000000000..5500bda99808d --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/TranslogFactory.java @@ -0,0 +1,32 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog; + +import java.io.IOException; +import java.util.function.LongConsumer; +import java.util.function.LongSupplier; + +/** + * Translog Factory to enable creation of various local on-disk + * and remote store flavors of {@link Translog} + * + * @opensearch.internal + */ +@FunctionalInterface +public interface TranslogFactory { + + Translog newTranslog( + final TranslogConfig config, + final String translogUUID, + final TranslogDeletionPolicy deletionPolicy, + final LongSupplier globalCheckpointSupplier, + final LongSupplier primaryTermSupplier, + final LongConsumer persistedSequenceNumberConsumer + ) throws IOException; +} diff --git a/server/src/main/java/org/opensearch/index/translog/WriteOnlyTranslogManager.java b/server/src/main/java/org/opensearch/index/translog/WriteOnlyTranslogManager.java index 09f5f38a9f6a9..96a2dd05851c0 100644 --- a/server/src/main/java/org/opensearch/index/translog/WriteOnlyTranslogManager.java +++ b/server/src/main/java/org/opensearch/index/translog/WriteOnlyTranslogManager.java @@ -35,7 +35,8 @@ public WriteOnlyTranslogManager( Supplier localCheckpointTrackerSupplier, String translogUUID, TranslogEventListener translogEventListener, - LifecycleAware engineLifecycleAware + LifecycleAware engineLifecycleAware, + TranslogFactory translogFactory ) throws IOException { super( translogConfig, @@ -47,7 +48,8 @@ public WriteOnlyTranslogManager( localCheckpointTrackerSupplier, translogUUID, translogEventListener, - engineLifecycleAware + engineLifecycleAware, + translogFactory ); } diff --git a/server/src/test/java/org/opensearch/index/engine/EngineConfigFactoryTests.java b/server/src/test/java/org/opensearch/index/engine/EngineConfigFactoryTests.java index 7ddd92ea7b36e..269d89352fb18 100644 --- a/server/src/test/java/org/opensearch/index/engine/EngineConfigFactoryTests.java +++ b/server/src/test/java/org/opensearch/index/engine/EngineConfigFactoryTests.java @@ -16,6 +16,7 @@ import org.opensearch.index.codec.CodecService; import org.opensearch.index.codec.CodecServiceFactory; import org.opensearch.index.seqno.RetentionLeases; +import org.opensearch.index.translog.InternalTranslogFactory; import org.opensearch.index.translog.TranslogDeletionPolicy; import org.opensearch.index.translog.TranslogDeletionPolicyFactory; import org.opensearch.index.translog.TranslogReader; @@ -66,7 +67,8 @@ public void testCreateEngineConfigFromFactory() { () -> new RetentionLeases(0, 0, Collections.emptyList()), null, null, - false + false, + new InternalTranslogFactory() ); assertNotNull(config.getCodec()); @@ -143,7 +145,8 @@ public void testCreateCodecServiceFromFactory() { () -> new RetentionLeases(0, 0, Collections.emptyList()), null, null, - false + false, + new InternalTranslogFactory() ); assertNotNull(config.getCodec()); } diff --git a/server/src/test/java/org/opensearch/index/engine/EngineConfigTests.java b/server/src/test/java/org/opensearch/index/engine/EngineConfigTests.java index 1c6d06e9bcc08..1754d6082b86d 100644 --- a/server/src/test/java/org/opensearch/index/engine/EngineConfigTests.java +++ b/server/src/test/java/org/opensearch/index/engine/EngineConfigTests.java @@ -13,6 +13,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.index.IndexSettings; import org.opensearch.index.seqno.RetentionLeases; +import org.opensearch.index.translog.InternalTranslogFactory; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.IndexSettingsModule; import org.opensearch.test.OpenSearchTestCase; @@ -102,7 +103,8 @@ private EngineConfig createReadOnlyEngine(IndexSettings indexSettings) { () -> RetentionLeases.EMPTY, null, null, - true + true, + new InternalTranslogFactory() ); } } diff --git a/server/src/test/java/org/opensearch/index/translog/InternalTranslogManagerTests.java b/server/src/test/java/org/opensearch/index/translog/InternalTranslogManagerTests.java index 5171f0dfa1d18..234abfba66622 100644 --- a/server/src/test/java/org/opensearch/index/translog/InternalTranslogManagerTests.java +++ b/server/src/test/java/org/opensearch/index/translog/InternalTranslogManagerTests.java @@ -47,7 +47,8 @@ public void testRecoveryFromTranslog() throws IOException { () -> tracker, translogUUID, TranslogEventListener.NOOP_TRANSLOG_EVENT_LISTENER, - () -> {} + () -> {}, + new InternalTranslogFactory() ); final int docs = randomIntBetween(1, 100); for (int i = 0; i < docs; i++) { @@ -85,7 +86,8 @@ public void onBeginTranslogRecovery() { beginTranslogRecoveryInvoked.set(true); } }, - () -> {} + () -> {}, + new InternalTranslogFactory() ); AtomicInteger opsRecovered = new AtomicInteger(); int opsRecoveredFromTranslog = translogManager.recoverFromTranslog((snapshot) -> { @@ -122,7 +124,8 @@ public void testTranslogRollsGeneration() throws IOException { () -> tracker, translogUUID, TranslogEventListener.NOOP_TRANSLOG_EVENT_LISTENER, - () -> {} + () -> {}, + new InternalTranslogFactory() ); final int docs = randomIntBetween(1, 100); for (int i = 0; i < docs; i++) { @@ -150,7 +153,8 @@ public void testTranslogRollsGeneration() throws IOException { () -> new LocalCheckpointTracker(NO_OPS_PERFORMED, NO_OPS_PERFORMED), translogUUID, TranslogEventListener.NOOP_TRANSLOG_EVENT_LISTENER, - () -> {} + () -> {}, + new InternalTranslogFactory() ); AtomicInteger opsRecovered = new AtomicInteger(); int opsRecoveredFromTranslog = translogManager.recoverFromTranslog((snapshot) -> { @@ -183,7 +187,8 @@ public void testTrimOperationsFromTranslog() throws IOException { () -> tracker, translogUUID, TranslogEventListener.NOOP_TRANSLOG_EVENT_LISTENER, - () -> {} + () -> {}, + new InternalTranslogFactory() ); final int docs = randomIntBetween(1, 100); for (int i = 0; i < docs; i++) { @@ -213,7 +218,8 @@ public void testTrimOperationsFromTranslog() throws IOException { () -> new LocalCheckpointTracker(NO_OPS_PERFORMED, NO_OPS_PERFORMED), translogUUID, TranslogEventListener.NOOP_TRANSLOG_EVENT_LISTENER, - () -> {} + () -> {}, + new InternalTranslogFactory() ); AtomicInteger opsRecovered = new AtomicInteger(); int opsRecoveredFromTranslog = translogManager.recoverFromTranslog((snapshot) -> { @@ -260,7 +266,8 @@ public void onAfterTranslogSync() { } } }, - () -> {} + () -> {}, + new InternalTranslogFactory() ); translogManagerAtomicReference.set(translogManager); Engine.Index index = indexForDoc(doc); diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 7dedc572ff19b..f446538acccbb 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -90,6 +90,7 @@ import org.opensearch.index.snapshots.IndexShardSnapshotStatus; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.index.translog.InternalTranslogFactory; import org.opensearch.index.translog.Translog; import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.indices.breaker.HierarchyCircuitBreakerService; @@ -555,6 +556,7 @@ protected IndexShard newShard( globalCheckpointSyncer, retentionLeaseSyncer, breakerService, + new InternalTranslogFactory(), checkpointPublisher, remoteStore ); From e17af2a19a6fb0258dca5a075cbe273b4367056b Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Tue, 16 Aug 2022 18:06:42 -0400 Subject: [PATCH 091/104] Added bwc version 2.2.1 (#4194) Co-authored-by: opensearch-ci-bot --- .ci/bwcVersions | 1 + server/src/main/java/org/opensearch/Version.java | 1 + 2 files changed, 2 insertions(+) diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 67976d58188e2..1ba3ee562317a 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -48,4 +48,5 @@ BWC_VERSION: - "2.1.0" - "2.1.1" - "2.2.0" + - "2.2.1" - "2.3.0" diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index 27a39db1c50cb..ba512d3fbcdd9 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -95,6 +95,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_1_0 = new Version(2010099, org.apache.lucene.util.Version.LUCENE_9_2_0); public static final Version V_2_1_1 = new Version(2010199, org.apache.lucene.util.Version.LUCENE_9_2_0); public static final Version V_2_2_0 = new Version(2020099, org.apache.lucene.util.Version.LUCENE_9_3_0); + public static final Version V_2_2_1 = new Version(2020199, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_2_3_0 = new Version(2030099, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_4_0); public static final Version CURRENT = V_3_0_0; From 237f1a5344fb96e888852e089be349867b3c2340 Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Tue, 16 Aug 2022 17:28:25 -0700 Subject: [PATCH 092/104] [Segment Replication] Wait for documents to replicate to replica shards (#4236) * [Segment Replication] Add thread sleep to account for replica lag in delete operations test Signed-off-by: Suraj Singh * Address review comments, assertBusy on doc count rather than sleep Signed-off-by: Suraj Singh Signed-off-by: Suraj Singh --- .../replication/SegmentReplicationIT.java | 20 ++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java index 96ac703b9837e..9518d5e802a8e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java @@ -38,6 +38,7 @@ import java.util.Map; import java.util.Optional; import java.util.Set; +import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.stream.Collectors; @@ -293,9 +294,22 @@ public void testDeleteOperations() throws Exception { refresh(INDEX_NAME); waitForReplicaUpdate(); - - assertHitCount(client(nodeA).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), expectedHitCount - 1); - assertHitCount(client(nodeB).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), expectedHitCount - 1); + assertBusy(() -> { + final long nodeA_Count = client(nodeA).prepareSearch(INDEX_NAME) + .setSize(0) + .setPreference("_only_local") + .get() + .getHits() + .getTotalHits().value; + assertEquals(expectedHitCount - 1, nodeA_Count); + final long nodeB_Count = client(nodeB).prepareSearch(INDEX_NAME) + .setSize(0) + .setPreference("_only_local") + .get() + .getHits() + .getTotalHits().value; + assertEquals(expectedHitCount - 1, nodeB_Count); + }, 5, TimeUnit.SECONDS); } } From 3a97d4cad65fa19126340aabf7e8bd2b1ce2b822 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Tue, 16 Aug 2022 18:39:51 -0700 Subject: [PATCH 093/104] Segment Replication - Add additional unit tests for update & delete ops. (#4237) * Segment Replication - Add additional unit tests for update & delete operations. Signed-off-by: Marc Handalian * Fix spotless. Signed-off-by: Marc Handalian Signed-off-by: Marc Handalian --- .../SegmentReplicationIndexShardTests.java | 54 +++++++++++++++++++ ...enSearchIndexLevelReplicationTestCase.java | 7 ++- 2 files changed, 60 insertions(+), 1 deletion(-) diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java index d10f8ced963b7..4f6d86c13e12c 100644 --- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java @@ -8,17 +8,23 @@ package org.opensearch.index.shard; +import org.opensearch.action.delete.DeleteRequest; +import org.opensearch.action.index.IndexRequest; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.IndexSettings; +import org.opensearch.index.engine.DocIdSeqNoAndSource; import org.opensearch.index.engine.NRTReplicationEngineFactory; +import org.opensearch.index.mapper.MapperService; import org.opensearch.index.replication.OpenSearchIndexLevelReplicationTestCase; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.common.ReplicationType; import java.io.IOException; +import java.util.List; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.verify; @@ -52,6 +58,54 @@ public void testReplicationCheckpointNotNullForSegReb() throws IOException { closeShards(indexShard); } + public void testSegmentReplication_Index_Update_Delete() throws Exception { + String mappings = "{ \"" + MapperService.SINGLE_MAPPING_NAME + "\": { \"properties\": { \"foo\": { \"type\": \"keyword\"} }}}"; + try (ReplicationGroup shards = createGroup(2, settings, mappings, new NRTReplicationEngineFactory())) { + shards.startAll(); + final IndexShard primaryShard = shards.getPrimary(); + + final int numDocs = randomIntBetween(100, 200); + for (int i = 0; i < numDocs; i++) { + shards.index(new IndexRequest(index.getName()).id(String.valueOf(i)).source("{\"foo\": \"bar\"}", XContentType.JSON)); + } + + primaryShard.refresh("Test"); + replicateSegments(primaryShard, shards.getReplicas()); + + shards.assertAllEqual(numDocs); + + for (int i = 0; i < numDocs; i++) { + // randomly update docs. + if (randomBoolean()) { + shards.index( + new IndexRequest(index.getName()).id(String.valueOf(i)).source("{ \"foo\" : \"baz\" }", XContentType.JSON) + ); + } + } + + primaryShard.refresh("Test"); + replicateSegments(primaryShard, shards.getReplicas()); + shards.assertAllEqual(numDocs); + + final List docs = getDocIdAndSeqNos(primaryShard); + for (IndexShard shard : shards.getReplicas()) { + assertEquals(getDocIdAndSeqNos(shard), docs); + } + for (int i = 0; i < numDocs; i++) { + // randomly delete. + if (randomBoolean()) { + shards.delete(new DeleteRequest(index.getName()).id(String.valueOf(i))); + } + } + primaryShard.refresh("Test"); + replicateSegments(primaryShard, shards.getReplicas()); + final List docsAfterDelete = getDocIdAndSeqNos(primaryShard); + for (IndexShard shard : shards.getReplicas()) { + assertEquals(getDocIdAndSeqNos(shard), docsAfterDelete); + } + } + } + public void testIgnoreShardIdle() throws Exception { try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { shards.startAll(); diff --git a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java index 249ffcfd0bf6e..b3f062aef4fbe 100644 --- a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java @@ -141,7 +141,12 @@ protected ReplicationGroup createGroup(int replicas, Settings settings) throws I } protected ReplicationGroup createGroup(int replicas, Settings settings, EngineFactory engineFactory) throws IOException { - IndexMetadata metadata = buildIndexMetadata(replicas, settings, indexMapping); + return createGroup(replicas, settings, indexMapping, engineFactory); + } + + protected ReplicationGroup createGroup(int replicas, Settings settings, String mappings, EngineFactory engineFactory) + throws IOException { + IndexMetadata metadata = buildIndexMetadata(replicas, settings, mappings); return new ReplicationGroup(metadata) { @Override protected EngineFactory getEngineFactory(ShardRouting routing) { From f65e02d1b910bd0a1990868bfa5d12ba829bbbd5 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Wed, 17 Aug 2022 10:32:11 -0700 Subject: [PATCH 094/104] Support shard promotion with Segment Replication. (#4135) * Support shard promotion with Segment Replication. This change adds basic failover support with segment replication. Once selected, a replica will commit and reopen a writeable engine. Signed-off-by: Marc Handalian * Add check to ensure a closed shard does not publish checkpoints. Signed-off-by: Marc Handalian * Clean up in SegmentReplicationIT. Signed-off-by: Marc Handalian * PR feedback. Signed-off-by: Marc Handalian * Fix merge conflict. Signed-off-by: Marc Handalian Signed-off-by: Marc Handalian --- .../replication/SegmentReplicationIT.java | 148 +++++++++++++++++- .../org/opensearch/index/IndexService.java | 2 +- .../index/engine/NRTReplicationEngine.java | 17 ++ .../shard/CheckpointRefreshListener.java | 2 +- .../opensearch/index/shard/IndexShard.java | 40 +++-- .../org/opensearch/index/store/Store.java | 42 +++++ .../engine/NRTReplicationEngineTests.java | 52 ++++++ .../SegmentReplicationIndexShardTests.java | 80 +++++++++- 8 files changed, 363 insertions(+), 20 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java index 9518d5e802a8e..8566cc5556861 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java @@ -19,7 +19,10 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.allocation.command.CancelAllocationCommand; +import org.opensearch.common.Nullable; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.Index; @@ -30,6 +33,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.BackgroundIndexer; +import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import java.io.IOException; @@ -73,6 +77,109 @@ protected boolean addMockInternalEngine() { return false; } + public void testPrimaryStopped_ReplicaPromoted() throws Exception { + final String primary = internalCluster().startNode(); + createIndex(INDEX_NAME); + ensureYellowAndNoInitializingShards(INDEX_NAME); + final String replica = internalCluster().startNode(); + ensureGreen(INDEX_NAME); + + client().prepareIndex(INDEX_NAME).setId("1").setSource("foo", "bar").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + refresh(INDEX_NAME); + + waitForReplicaUpdate(); + assertHitCount(client(primary).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), 1); + assertHitCount(client(replica).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), 1); + + // index another doc but don't refresh, we will ensure this is searchable once replica is promoted. + client().prepareIndex(INDEX_NAME).setId("2").setSource("bar", "baz").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + + // stop the primary node - we only have one shard on here. + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primary)); + ensureYellowAndNoInitializingShards(INDEX_NAME); + + final ShardRouting replicaShardRouting = getShardRoutingForNodeName(replica); + assertNotNull(replicaShardRouting); + assertTrue(replicaShardRouting + " should be promoted as a primary", replicaShardRouting.primary()); + assertHitCount(client(replica).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), 2); + + // assert we can index into the new primary. + client().prepareIndex(INDEX_NAME).setId("3").setSource("bar", "baz").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + assertHitCount(client(replica).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), 3); + + // start another node, index another doc and replicate. + String nodeC = internalCluster().startNode(); + ensureGreen(INDEX_NAME); + client().prepareIndex(INDEX_NAME).setId("4").setSource("baz", "baz").get(); + refresh(INDEX_NAME); + waitForReplicaUpdate(); + assertHitCount(client(nodeC).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), 4); + assertHitCount(client(replica).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), 4); + assertSegmentStats(REPLICA_COUNT); + } + + public void testRestartPrimary() throws Exception { + final String primary = internalCluster().startNode(); + createIndex(INDEX_NAME); + ensureYellowAndNoInitializingShards(INDEX_NAME); + final String replica = internalCluster().startNode(); + ensureGreen(INDEX_NAME); + + assertEquals(getNodeContainingPrimaryShard().getName(), primary); + + final int initialDocCount = 1; + client().prepareIndex(INDEX_NAME).setId("1").setSource("foo", "bar").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + refresh(INDEX_NAME); + + waitForReplicaUpdate(); + assertDocCounts(initialDocCount, replica, primary); + + internalCluster().restartNode(primary); + ensureGreen(INDEX_NAME); + + assertEquals(getNodeContainingPrimaryShard().getName(), replica); + + flushAndRefresh(INDEX_NAME); + waitForReplicaUpdate(); + + assertDocCounts(initialDocCount, replica, primary); + assertSegmentStats(REPLICA_COUNT); + } + + public void testCancelPrimaryAllocation() throws Exception { + // this test cancels allocation on the primary - promoting the new replica and recreating the former primary as a replica. + final String primary = internalCluster().startNode(); + createIndex(INDEX_NAME); + ensureYellowAndNoInitializingShards(INDEX_NAME); + final String replica = internalCluster().startNode(); + ensureGreen(INDEX_NAME); + + final int initialDocCount = 1; + + client().prepareIndex(INDEX_NAME).setId("1").setSource("foo", "bar").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + refresh(INDEX_NAME); + + waitForReplicaUpdate(); + assertDocCounts(initialDocCount, replica, primary); + + final IndexShard indexShard = getIndexShard(primary); + client().admin() + .cluster() + .prepareReroute() + .add(new CancelAllocationCommand(INDEX_NAME, indexShard.shardId().id(), primary, true)) + .execute() + .actionGet(); + ensureGreen(INDEX_NAME); + + assertEquals(getNodeContainingPrimaryShard().getName(), replica); + + flushAndRefresh(INDEX_NAME); + waitForReplicaUpdate(); + + assertDocCounts(initialDocCount, replica, primary); + assertSegmentStats(REPLICA_COUNT); + } + public void testReplicationAfterPrimaryRefreshAndFlush() throws Exception { final String nodeA = internalCluster().startNode(); final String nodeB = internalCluster().startNode(); @@ -240,9 +347,8 @@ public void testStartReplicaAfterPrimaryIndexesDocs() throws Exception { assertHitCount(client(primaryNode).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), 3); assertHitCount(client(replicaNode).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), 3); - final Index index = resolveIndex(INDEX_NAME); - IndexShard primaryShard = getIndexShard(index, primaryNode); - IndexShard replicaShard = getIndexShard(index, replicaNode); + IndexShard primaryShard = getIndexShard(primaryNode); + IndexShard replicaShard = getIndexShard(replicaNode); assertEquals( primaryShard.translogStats().estimatedNumberOfOperations(), replicaShard.translogStats().estimatedNumberOfOperations() @@ -351,8 +457,7 @@ private void assertSegmentStats(int numberOfReplicas) throws IOException { final ShardRouting replicaShardRouting = shardSegment.getShardRouting(); ClusterState state = client(internalCluster().getMasterName()).admin().cluster().prepareState().get().getState(); final DiscoveryNode replicaNode = state.nodes().resolveNode(replicaShardRouting.currentNodeId()); - final Index index = resolveIndex(INDEX_NAME); - IndexShard indexShard = getIndexShard(index, replicaNode.getName()); + IndexShard indexShard = getIndexShard(replicaNode.getName()); final String lastCommitSegmentsFileName = SegmentInfos.getLastCommitSegmentsFileName(indexShard.store().directory()); // calls to readCommit will fail if a valid commit point and all its segments are not in the store. SegmentInfos.readCommit(indexShard.store().directory(), lastCommitSegmentsFileName); @@ -392,7 +497,8 @@ private void waitForReplicaUpdate() throws Exception { }); } - private IndexShard getIndexShard(Index index, String node) { + private IndexShard getIndexShard(String node) { + final Index index = resolveIndex(INDEX_NAME); IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node); IndexService indexService = indicesService.indexServiceSafe(index); final Optional shardId = indexService.shardIds().stream().findFirst(); @@ -409,7 +515,8 @@ private List getShardSegments(IndicesSegmentResponse indicesSeg } private Map getLatestSegments(ShardSegments segments) { - final Long latestPrimaryGen = segments.getSegments().stream().map(Segment::getGeneration).max(Long::compare).get(); + final Optional generation = segments.getSegments().stream().map(Segment::getGeneration).max(Long::compare); + final Long latestPrimaryGen = generation.get(); return segments.getSegments() .stream() .filter(s -> s.getGeneration() == latestPrimaryGen) @@ -419,4 +526,31 @@ private Map getLatestSegments(ShardSegments segments) { private Map> segmentsByShardType(ShardSegments[] replicationGroupSegments) { return Arrays.stream(replicationGroupSegments).collect(Collectors.groupingBy(s -> s.getShardRouting().primary())); } + + @Nullable + private ShardRouting getShardRoutingForNodeName(String nodeName) { + final ClusterState state = client(internalCluster().getClusterManagerName()).admin().cluster().prepareState().get().getState(); + for (IndexShardRoutingTable shardRoutingTable : state.routingTable().index(INDEX_NAME)) { + for (ShardRouting shardRouting : shardRoutingTable.activeShards()) { + final String nodeId = shardRouting.currentNodeId(); + final DiscoveryNode discoveryNode = state.nodes().resolveNode(nodeId); + if (discoveryNode.getName().equals(nodeName)) { + return shardRouting; + } + } + } + return null; + } + + private void assertDocCounts(int expectedDocCount, String... nodeNames) { + for (String node : nodeNames) { + assertHitCount(client(node).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), expectedDocCount); + } + } + + private DiscoveryNode getNodeContainingPrimaryShard() { + final ClusterState state = client(internalCluster().getClusterManagerName()).admin().cluster().prepareState().get().getState(); + final ShardRouting primaryShard = state.routingTable().index(INDEX_NAME).shard(0).primaryShard(); + return state.nodes().resolveNode(primaryShard.currentNodeId()); + } } diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index 38065bf5aed20..e1427df1c34ab 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -550,7 +550,7 @@ public synchronized IndexShard createShard( circuitBreakerService, // TODO Replace with remote translog factory in the follow up PR this.indexSettings.isRemoteTranslogStoreEnabled() ? null : new InternalTranslogFactory(), - this.indexSettings.isSegRepEnabled() && routing.primary() ? checkpointPublisher : null, + this.indexSettings.isSegRepEnabled() ? checkpointPublisher : null, remoteStore ); eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created"); diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java index a481203ba3dea..6f5b7030ed65f 100644 --- a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java @@ -129,6 +129,23 @@ public synchronized void updateSegments(final SegmentInfos infos, long seqNo) th localCheckpointTracker.fastForwardProcessedSeqNo(seqNo); } + /** + * Persist the latest live SegmentInfos. + * + * This method creates a commit point from the latest SegmentInfos. It is intended to be used when this shard is about to be promoted as the new primary. + * + * TODO: If this method is invoked while the engine is currently updating segments on its reader, wait for that update to complete so the updated segments are used. + * + * + * @throws IOException - When there is an IO error committing the SegmentInfos. + */ + public void commitSegmentInfos() throws IOException { + // TODO: This method should wait for replication events to finalize. + final SegmentInfos latestSegmentInfos = getLatestSegmentInfos(); + store.commitSegmentInfos(latestSegmentInfos, localCheckpointTracker.getMaxSeqNo(), localCheckpointTracker.getProcessedCheckpoint()); + translogManager.syncTranslog(); + } + @Override public String getHistoryUUID() { return loadHistoryUUID(lastCommittedSegmentInfos.userData); diff --git a/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java index 96d74bea85920..fb046e2310d93 100644 --- a/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java @@ -40,7 +40,7 @@ public void beforeRefresh() throws IOException { @Override public void afterRefresh(boolean didRefresh) throws IOException { - if (didRefresh && shard.getReplicationTracker().isPrimaryMode()) { + if (didRefresh && shard.state() != IndexShardState.CLOSED && shard.getReplicationTracker().isPrimaryMode()) { publisher.publish(shard); } } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 8970e0734f184..67a8e691fda0d 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -242,6 +242,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl private final GlobalCheckpointListeners globalCheckpointListeners; private final PendingReplicationActions pendingReplicationActions; private final ReplicationTracker replicationTracker; + private final SegmentReplicationCheckpointPublisher checkpointPublisher; protected volatile ShardRouting shardRouting; protected volatile IndexShardState state; @@ -306,8 +307,6 @@ Runnable getGlobalCheckpointSyncer() { private final AtomicReference pendingRefreshLocation = new AtomicReference<>(); private final RefreshPendingLocationListener refreshPendingLocationListener; private volatile boolean useRetentionLeasesInPeerRecovery; - private final ReferenceManager.RefreshListener checkpointRefreshListener; - private final Store remoteStore; private final TranslogFactory translogFactory; @@ -417,11 +416,7 @@ public boolean shouldCache(Query query) { persistMetadata(path, indexSettings, shardRouting, null, logger); this.useRetentionLeasesInPeerRecovery = replicationTracker.hasAllPeerRecoveryRetentionLeases(); this.refreshPendingLocationListener = new RefreshPendingLocationListener(); - if (checkpointPublisher != null) { - this.checkpointRefreshListener = new CheckpointRefreshListener(this, checkpointPublisher); - } else { - this.checkpointRefreshListener = null; - } + this.checkpointPublisher = checkpointPublisher; this.remoteStore = remoteStore; this.translogFactory = translogFactory; } @@ -627,6 +622,11 @@ public void updateShardState( + newRouting; assert getOperationPrimaryTerm() == newPrimaryTerm; try { + if (indexSettings.isSegRepEnabled()) { + // this Shard's engine was read only, we need to update its engine before restoring local history from xlog. + assert newRouting.primary() && currentRouting.primary() == false; + promoteNRTReplicaToPrimary(); + } replicationTracker.activatePrimaryMode(getLocalCheckpoint()); ensurePeerRecoveryRetentionLeasesExist(); /* @@ -3231,8 +3231,8 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) thro Directory remoteDirectory = ((FilterDirectory) ((FilterDirectory) remoteStore.directory()).getDelegate()).getDelegate(); internalRefreshListener.add(new RemoteStoreRefreshListener(store.directory(), remoteDirectory)); } - if (this.checkpointRefreshListener != null) { - internalRefreshListener.add(checkpointRefreshListener); + if (this.checkpointPublisher != null && indexSettings.isSegRepEnabled() && shardRouting.primary()) { + internalRefreshListener.add(new CheckpointRefreshListener(this, this.checkpointPublisher)); } return this.engineConfigFactory.newEngineConfig( @@ -4123,4 +4123,26 @@ RetentionLeaseSyncer getRetentionLeaseSyncer() { public GatedCloseable getSegmentInfosSnapshot() { return getEngine().getSegmentInfosSnapshot(); } + + /** + * With segment replication enabled - prepare the shard's engine to be promoted as the new primary. + * + * If this shard is currently using a replication engine, this method: + * 1. Invokes {@link NRTReplicationEngine#commitSegmentInfos()} to ensure the engine can be reopened as writeable from the latest refresh point. + * InternalEngine opens its IndexWriter from an on-disk commit point, but this replica may have recently synced from a primary's refresh point, meaning it has documents searchable in its in-memory SegmentInfos + * that are not part of a commit point. This ensures that those documents are made part of a commit and do not need to be reindexed after promotion. + * 2. Invokes resetEngineToGlobalCheckpoint - This call performs the engine swap, opening up as a writeable engine and replays any operations in the xlog. The operations indexed from xlog here will be + * any ack'd writes that were not copied to this replica before promotion. + */ + private void promoteNRTReplicaToPrimary() { + assert shardRouting.primary() && indexSettings.isSegRepEnabled(); + getReplicationEngine().ifPresentOrElse(engine -> { + try { + engine.commitSegmentInfos(); + resetEngineToGlobalCheckpoint(); + } catch (IOException e) { + throw new EngineException(shardId, "Unable to update replica to writeable engine, failing shard", e); + } + }, () -> { throw new EngineException(shardId, "Expected replica engine to be of type NRTReplicationEngine"); }); + } } diff --git a/server/src/main/java/org/opensearch/index/store/Store.java b/server/src/main/java/org/opensearch/index/store/Store.java index 163717ad94c2c..58598ab2d08f4 100644 --- a/server/src/main/java/org/opensearch/index/store/Store.java +++ b/server/src/main/java/org/opensearch/index/store/Store.java @@ -121,6 +121,7 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; +import static org.opensearch.index.seqno.SequenceNumbers.LOCAL_CHECKPOINT_KEY; /** * A Store provides plain access to files written by an opensearch index shard. Each shard @@ -799,6 +800,47 @@ public void beforeClose() { shardLock.setDetails("closing shard"); } + /** + * This method should only be used with Segment Replication. + * Perform a commit from a live {@link SegmentInfos}. Replica engines with segrep do not have an IndexWriter and Lucene does not currently + * have the ability to create a writer directly from a SegmentInfos object. To promote the replica as a primary and avoid reindexing, we must first commit + * on the replica so that it can be opened with a writeable engine. Further, InternalEngine currently invokes `trimUnsafeCommits` which reverts the engine to a previous safeCommit where the max seqNo is less than or equal + * to the current global checkpoint. It is likely that the replica has a maxSeqNo that is higher than the global cp and a new commit will be wiped. + * + * To get around these limitations, this method first creates an IndexCommit directly from SegmentInfos, it then + * uses an appending IW to create an IndexCommit from the commit created on SegmentInfos. + * This ensures that 1. All files in the new commit are fsynced and 2. Deletes older commit points so the only commit to start from is our new commit. + * + * @param latestSegmentInfos {@link SegmentInfos} The latest active infos + * @param maxSeqNo The engine's current maxSeqNo + * @param processedCheckpoint The engine's current processed checkpoint. + * @throws IOException when there is an IO error committing. + */ + public void commitSegmentInfos(SegmentInfos latestSegmentInfos, long maxSeqNo, long processedCheckpoint) throws IOException { + assert indexSettings.isSegRepEnabled(); + metadataLock.writeLock().lock(); + try { + final Map userData = new HashMap<>(latestSegmentInfos.getUserData()); + userData.put(LOCAL_CHECKPOINT_KEY, String.valueOf(processedCheckpoint)); + userData.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(maxSeqNo)); + latestSegmentInfos.setUserData(userData, true); + latestSegmentInfos.commit(directory()); + + // similar to TrimUnsafeCommits, create a commit with an appending IW, this will delete old commits and ensure all files + // associated with the SegmentInfos.commit are fsynced. + final List existingCommits = DirectoryReader.listCommits(directory); + assert existingCommits.isEmpty() == false : "Expected at least one commit but none found"; + final IndexCommit lastIndexCommit = existingCommits.get(existingCommits.size() - 1); + assert latestSegmentInfos.getSegmentsFileName().equals(lastIndexCommit.getSegmentsFileName()); + try (IndexWriter writer = newAppendingIndexWriter(directory, lastIndexCommit)) { + writer.setLiveCommitData(lastIndexCommit.getUserData().entrySet()); + writer.commit(); + } + } finally { + metadataLock.writeLock().unlock(); + } + } + /** * A store directory * diff --git a/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java index 675ff860c3334..1fe1a37dedae0 100644 --- a/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java @@ -12,18 +12,25 @@ import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.SegmentInfos; import org.hamcrest.MatcherAssert; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.lucene.search.Queries; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.ParsedDocument; +import org.opensearch.index.seqno.LocalCheckpointTracker; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.store.Store; import org.opensearch.index.translog.TestTranslog; import org.opensearch.index.translog.Translog; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.test.IndexSettingsModule; import java.io.IOException; import java.nio.file.Path; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; @@ -31,6 +38,8 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; import static org.opensearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; +import static org.opensearch.index.seqno.SequenceNumbers.LOCAL_CHECKPOINT_KEY; +import static org.opensearch.index.seqno.SequenceNumbers.MAX_SEQ_NO; public class NRTReplicationEngineTests extends EngineTestCase { @@ -210,6 +219,49 @@ public void testTrimTranslogOps() throws Exception { } } + public void testCommitSegmentInfos() throws Exception { + // This test asserts that NRTReplication#commitSegmentInfos creates a new commit point with the latest checkpoints + // stored in user data. + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings( + "index", + Settings.builder().put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT).build() + ); + try ( + final Store nrtEngineStore = createStore(indexSettings, newDirectory()); + final NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore) + ) { + List operations = generateHistoryOnReplica(between(1, 500), randomBoolean(), randomBoolean(), randomBoolean()) + .stream() + .filter(op -> op.operationType().equals(Engine.Operation.TYPE.INDEX)) + .collect(Collectors.toList()); + for (Engine.Operation op : operations) { + applyOperation(nrtEngine, op); + } + + final SegmentInfos previousInfos = nrtEngine.getLatestSegmentInfos(); + LocalCheckpointTracker localCheckpointTracker = nrtEngine.getLocalCheckpointTracker(); + final long maxSeqNo = localCheckpointTracker.getMaxSeqNo(); + final long processedCheckpoint = localCheckpointTracker.getProcessedCheckpoint(); + nrtEngine.commitSegmentInfos(); + + // ensure getLatestSegmentInfos returns an updated infos ref with correct userdata. + final SegmentInfos latestSegmentInfos = nrtEngine.getLatestSegmentInfos(); + assertEquals(previousInfos.getGeneration(), latestSegmentInfos.getLastGeneration()); + Map userData = latestSegmentInfos.getUserData(); + assertEquals(processedCheckpoint, localCheckpointTracker.getProcessedCheckpoint()); + assertEquals(maxSeqNo, Long.parseLong(userData.get(MAX_SEQ_NO))); + assertEquals(processedCheckpoint, Long.parseLong(userData.get(LOCAL_CHECKPOINT_KEY))); + + // read infos from store and assert userdata + final String lastCommitSegmentsFileName = SegmentInfos.getLastCommitSegmentsFileName(nrtEngineStore.directory()); + final SegmentInfos committedInfos = SegmentInfos.readCommit(nrtEngineStore.directory(), lastCommitSegmentsFileName); + userData = committedInfos.getUserData(); + assertEquals(processedCheckpoint, Long.parseLong(userData.get(LOCAL_CHECKPOINT_KEY))); + assertEquals(maxSeqNo, Long.parseLong(userData.get(MAX_SEQ_NO))); + } + } + private void assertMatchingSegmentsAndCheckpoints(NRTReplicationEngine nrtEngine, SegmentInfos expectedSegmentInfos) throws IOException { assertEquals(engine.getPersistedLocalCheckpoint(), nrtEngine.getPersistedLocalCheckpoint()); diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java index 4f6d86c13e12c..23371a39871c7 100644 --- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java @@ -16,6 +16,8 @@ import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.IndexSettings; import org.opensearch.index.engine.DocIdSeqNoAndSource; +import org.opensearch.index.engine.InternalEngine; +import org.opensearch.index.engine.NRTReplicationEngine; import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.replication.OpenSearchIndexLevelReplicationTestCase; @@ -26,10 +28,12 @@ import java.io.IOException; import java.util.List; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.verify; +import static java.util.Arrays.asList; +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; public class SegmentReplicationIndexShardTests extends OpenSearchIndexLevelReplicationTestCase { @@ -177,4 +181,76 @@ public void testPublishCheckpointAfterRelocationHandOff() throws IOException { closeShards(shard); } + public void testNRTReplicaPromotedAsPrimary() throws Exception { + try (ReplicationGroup shards = createGroup(2, settings, new NRTReplicationEngineFactory())) { + shards.startAll(); + IndexShard oldPrimary = shards.getPrimary(); + final IndexShard nextPrimary = shards.getReplicas().get(0); + final IndexShard replica = shards.getReplicas().get(1); + + // 1. Create ops that are in the index and xlog of both shards but not yet part of a commit point. + final int numDocs = shards.indexDocs(randomInt(10)); + + // refresh and copy the segments over. + oldPrimary.refresh("Test"); + replicateSegments(oldPrimary, shards.getReplicas()); + + // at this point both shards should have numDocs persisted and searchable. + assertDocCounts(oldPrimary, numDocs, numDocs); + for (IndexShard shard : shards.getReplicas()) { + assertDocCounts(shard, numDocs, numDocs); + } + + // 2. Create ops that are in the replica's xlog, not in the index. + // index some more into both but don't replicate. replica will have only numDocs searchable, but should have totalDocs + // persisted. + final int totalDocs = numDocs + shards.indexDocs(randomInt(10)); + + assertDocCounts(oldPrimary, totalDocs, totalDocs); + for (IndexShard shard : shards.getReplicas()) { + assertDocCounts(shard, totalDocs, numDocs); + } + + // promote the replica + shards.syncGlobalCheckpoint(); + assertEquals(totalDocs, nextPrimary.translogStats().estimatedNumberOfOperations()); + shards.promoteReplicaToPrimary(nextPrimary); + + // close and start the oldPrimary as a replica. + oldPrimary.close("demoted", false); + oldPrimary.store().close(); + oldPrimary = shards.addReplicaWithExistingPath(oldPrimary.shardPath(), oldPrimary.routingEntry().currentNodeId()); + shards.recoverReplica(oldPrimary); + + assertEquals(NRTReplicationEngine.class, oldPrimary.getEngine().getClass()); + assertEquals(InternalEngine.class, nextPrimary.getEngine().getClass()); + assertDocCounts(nextPrimary, totalDocs, totalDocs); + assertEquals(0, nextPrimary.translogStats().estimatedNumberOfOperations()); + + // refresh and push segments to our other replica. + nextPrimary.refresh("test"); + replicateSegments(nextPrimary, asList(replica)); + + for (IndexShard shard : shards) { + assertConsistentHistoryBetweenTranslogAndLucene(shard); + } + final List docsAfterRecovery = getDocIdAndSeqNos(shards.getPrimary()); + for (IndexShard shard : shards.getReplicas()) { + assertThat(shard.routingEntry().toString(), getDocIdAndSeqNos(shard), equalTo(docsAfterRecovery)); + } + } + } + + /** + * Assert persisted and searchable doc counts. This method should not be used while docs are concurrently indexed because + * it asserts point in time seqNos are relative to the doc counts. + */ + private void assertDocCounts(IndexShard indexShard, int expectedPersistedDocCount, int expectedSearchableDocCount) throws IOException { + assertDocCount(indexShard, expectedSearchableDocCount); + // assigned seqNos start at 0, so assert max & local seqNos are 1 less than our persisted doc count. + assertEquals(expectedPersistedDocCount - 1, indexShard.seqNoStats().getMaxSeqNo()); + assertEquals(expectedPersistedDocCount - 1, indexShard.seqNoStats().getLocalCheckpoint()); + // processed cp should be 1 less than our searchable doc count. + assertEquals(expectedSearchableDocCount - 1, indexShard.getProcessedLocalCheckpoint()); + } } From a2ba3a8c6662b0bac5fc3d73c5029fe323f1192b Mon Sep 17 00:00:00 2001 From: Kartik Ganesh Date: Wed, 17 Aug 2022 14:27:07 -0700 Subject: [PATCH 095/104] Added timing data and more granular stages to SegmentReplicationState (#4222) * Added timing data and more granular stages to SegmentReplicationState This change introduces instrumentation logging that measures the latency of the various stages of segment replication as seen by each replica. Logs have also been added to the source node for checkpoint publishing and checkpoint metadata responses. All logging is currently at the TRACE level. Signed-off-by: Kartik Ganesh * Fixing SegmentReplicationTarget tests Signed-off-by: Kartik Ganesh * Incorporated PR feedback Signed-off-by: Kartik Ganesh * Fixing SegmentReplicationTargetService tests Signed-off-by: Kartik Ganesh Signed-off-by: Kartik Ganesh --- .../SegmentReplicationSourceHandler.java | 18 ++++- .../SegmentReplicationSourceService.java | 14 ++++ .../replication/SegmentReplicationState.java | 71 +++++++++++++++---- .../replication/SegmentReplicationTarget.java | 19 +++-- .../SegmentReplicationTargetService.java | 22 +++++- .../checkpoint/PublishCheckpointAction.java | 24 ++++++- .../SegmentReplicationTargetServiceTests.java | 21 ++++-- .../SegmentReplicationTargetTests.java | 20 ++++-- 8 files changed, 175 insertions(+), 34 deletions(-) diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java index ce764900e433f..2d21653c1924c 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java @@ -27,6 +27,7 @@ import org.opensearch.indices.recovery.FileChunkWriter; import org.opensearch.indices.recovery.MultiChunkTransfer; import org.opensearch.indices.replication.common.CopyState; +import org.opensearch.indices.replication.common.ReplicationTimer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transports; @@ -104,16 +105,24 @@ class SegmentReplicationSourceHandler { * @param listener {@link ActionListener} that completes with the list of files sent. */ public synchronized void sendFiles(GetSegmentFilesRequest request, ActionListener listener) { + final ReplicationTimer timer = new ReplicationTimer(); if (isReplicating.compareAndSet(false, true) == false) { throw new OpenSearchException("Replication to {} is already running.", shard.shardId()); } future.addListener(listener, OpenSearchExecutors.newDirectExecutorService()); final Closeable releaseResources = () -> IOUtils.close(resources); try { - + timer.start(); final Consumer onFailure = e -> { assert Transports.assertNotTransportThread(SegmentReplicationSourceHandler.this + "[onFailure]"); IOUtils.closeWhileHandlingException(releaseResources, () -> future.onFailure(e)); + timer.stop(); + logger.trace( + "[replication id {}] Source node failed to send files to target node [{}], timing: {}", + request.getReplicationId(), + request.getTargetNode().getId(), + timer.time() + ); }; RunUnderPrimaryPermit.run(() -> { @@ -151,6 +160,13 @@ public synchronized void sendFiles(GetSegmentFilesRequest request, ActionListene future.onResponse(new GetSegmentFilesResponse(List.of(storeFileMetadata))); } finally { IOUtils.close(resources); + timer.stop(); + logger.trace( + "[replication id {}] Source node completed sending files to target node [{}], timing: {}", + request.getReplicationId(), + request.getTargetNode().getId(), + timer.time() + ); } }, onFailure); } catch (Exception e) { diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java index d428459884f97..0cee731fde2cb 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.action.support.ChannelActionListener; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterStateListener; @@ -25,6 +26,7 @@ import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RetryableTransportClient; import org.opensearch.indices.replication.common.CopyState; +import org.opensearch.indices.replication.common.ReplicationTimer; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportChannel; @@ -86,6 +88,8 @@ public SegmentReplicationSourceService( private class CheckpointInfoRequestHandler implements TransportRequestHandler { @Override public void messageReceived(CheckpointInfoRequest request, TransportChannel channel, Task task) throws Exception { + final ReplicationTimer timer = new ReplicationTimer(); + timer.start(); final RemoteSegmentFileChunkWriter segmentSegmentFileChunkWriter = new RemoteSegmentFileChunkWriter( request.getReplicationId(), recoverySettings, @@ -109,6 +113,16 @@ public void messageReceived(CheckpointInfoRequest request, TransportChannel chan copyState.getPendingDeleteFiles() ) ); + timer.stop(); + logger.trace( + new ParameterizedMessage( + "[replication id {}] Source node sent checkpoint info [{}] to target node [{}], timing: {}", + request.getReplicationId(), + copyState.getCheckpoint(), + request.getTargetNode().getId(), + timer.time() + ) + ); } } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationState.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationState.java index 838c06a4785ef..f865ba1332186 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationState.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationState.java @@ -8,10 +8,14 @@ package org.opensearch.indices.replication; +import org.opensearch.common.collect.Tuple; import org.opensearch.indices.replication.common.ReplicationLuceneIndex; import org.opensearch.indices.replication.common.ReplicationState; import org.opensearch.indices.replication.common.ReplicationTimer; +import java.util.ArrayList; +import java.util.List; + /** * ReplicationState implementation to track Segment Replication events. * @@ -26,10 +30,12 @@ public class SegmentReplicationState implements ReplicationState { */ public enum Stage { DONE((byte) 0), - INIT((byte) 1), - - REPLICATING((byte) 2); + REPLICATING((byte) 2), + GET_CHECKPOINT_INFO((byte) 3), + FILE_DIFF((byte) 4), + GET_FILES((byte) 5), + FINALIZE_REPLICATION((byte) 6); private static final Stage[] STAGES = new Stage[Stage.values().length]; @@ -60,13 +66,27 @@ public static Stage fromId(byte id) { private Stage stage; private final ReplicationLuceneIndex index; - private final ReplicationTimer timer; + private final ReplicationTimer overallTimer; + private final ReplicationTimer stageTimer; + private final List> timingData; + private long replicationId; public SegmentReplicationState(ReplicationLuceneIndex index) { stage = Stage.INIT; this.index = index; - timer = new ReplicationTimer(); - timer.start(); + // Timing data will have as many entries as stages, plus one + // additional entry for the overall timer + timingData = new ArrayList<>(Stage.values().length + 1); + overallTimer = new ReplicationTimer(); + stageTimer = new ReplicationTimer(); + stageTimer.start(); + // set an invalid value by default + this.replicationId = -1L; + } + + public SegmentReplicationState(ReplicationLuceneIndex index, long replicationId) { + this(index); + this.replicationId = replicationId; } @Override @@ -74,9 +94,17 @@ public ReplicationLuceneIndex getIndex() { return index; } + public long getReplicationId() { + return replicationId; + } + @Override public ReplicationTimer getTimer() { - return timer; + return overallTimer; + } + + public List> getTimingData() { + return timingData; } public Stage getStage() { @@ -90,6 +118,12 @@ protected void validateAndSetStage(Stage expected, Stage next) { "can't move replication to stage [" + next + "]. current stage: [" + stage + "] (expected [" + expected + "])" ); } + // save the timing data for the current step + stageTimer.stop(); + timingData.add(new Tuple<>(stage.name(), stageTimer.time())); + // restart the step timer + stageTimer.reset(); + stageTimer.start(); stage = next; } @@ -97,16 +131,29 @@ public void setStage(Stage stage) { switch (stage) { case INIT: this.stage = Stage.INIT; - getIndex().reset(); break; case REPLICATING: validateAndSetStage(Stage.INIT, stage); - getIndex().start(); + // only start the overall timer once we've started replication + overallTimer.start(); break; - case DONE: + case GET_CHECKPOINT_INFO: validateAndSetStage(Stage.REPLICATING, stage); - getIndex().stop(); - getTimer().stop(); + break; + case FILE_DIFF: + validateAndSetStage(Stage.GET_CHECKPOINT_INFO, stage); + break; + case GET_FILES: + validateAndSetStage(Stage.FILE_DIFF, stage); + break; + case FINALIZE_REPLICATION: + validateAndSetStage(Stage.GET_FILES, stage); + break; + case DONE: + validateAndSetStage(Stage.FINALIZE_REPLICATION, stage); + // add the overall timing data + overallTimer.stop(); + timingData.add(new Tuple<>("OVERALL", overallTimer.time())); break; default: throw new IllegalArgumentException("unknown SegmentReplicationState.Stage [" + stage + "]"); diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java index 73d9a2f805d75..a658ffc09d590 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java @@ -64,7 +64,7 @@ public SegmentReplicationTarget( super("replication_target", indexShard, new ReplicationLuceneIndex(), listener); this.checkpoint = checkpoint; this.source = source; - this.state = new SegmentReplicationState(stateIndex); + this.state = new SegmentReplicationState(stateIndex, getId()); this.multiFileWriter = new MultiFileWriter(indexShard.store(), stateIndex, getPrefix(), logger, this::ensureRefCount); } @@ -139,7 +139,9 @@ public void startReplication(ActionListener listener) { final StepListener getFilesListener = new StepListener<>(); final StepListener finalizeListener = new StepListener<>(); + logger.trace("[shardId {}] Replica starting replication [id {}]", shardId().getId(), getId()); // Get list of files to copy from this checkpoint. + state.setStage(SegmentReplicationState.Stage.GET_CHECKPOINT_INFO); source.getCheckpointMetadata(getId(), checkpoint, checkpointInfoListener); checkpointInfoListener.whenComplete(checkpointInfo -> getFiles(checkpointInfo, getFilesListener), listener::onFailure); @@ -152,14 +154,16 @@ public void startReplication(ActionListener listener) { private void getFiles(CheckpointInfoResponse checkpointInfo, StepListener getFilesListener) throws IOException { + state.setStage(SegmentReplicationState.Stage.FILE_DIFF); final Store.MetadataSnapshot snapshot = checkpointInfo.getSnapshot(); Store.MetadataSnapshot localMetadata = getMetadataSnapshot(); final Store.RecoveryDiff diff = snapshot.segmentReplicationDiff(localMetadata); - logger.debug("Replication diff {}", diff); - // Segments are immutable. So if the replica has any segments with the same name that differ from the one in the incoming snapshot - // from - // source that means the local copy of the segment has been corrupted/changed in some way and we throw an IllegalStateException to - // fail the shard + logger.trace("Replication diff {}", diff); + /* + * Segments are immutable. So if the replica has any segments with the same name that differ from the one in the incoming + * snapshot from source that means the local copy of the segment has been corrupted/changed in some way and we throw an + * IllegalStateException to fail the shard + */ if (diff.different.isEmpty() == false) { getFilesListener.onFailure( new IllegalStateException( @@ -177,15 +181,18 @@ private void getFiles(CheckpointInfoResponse checkpointInfo, StepListener listener) { + state.setStage(SegmentReplicationState.Stage.FINALIZE_REPLICATION); ActionListener.completeWith(listener, () -> { multiFileWriter.renameAllTempFiles(); final Store store = store(); diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java index f699f0edba842..a79ce195ad83b 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java @@ -116,7 +116,7 @@ public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSh * @param replicaShard replica shard on which checkpoint is received */ public synchronized void onNewCheckpoint(final ReplicationCheckpoint receivedCheckpoint, final IndexShard replicaShard) { - + logger.trace(() -> new ParameterizedMessage("Replica received new replication checkpoint from primary [{}]", receivedCheckpoint)); // Checks if received checkpoint is already present and ahead then it replaces old received checkpoint if (latestReceivedCheckpoint.get(replicaShard.shardId()) != null) { if (receivedCheckpoint.isAheadOf(latestReceivedCheckpoint.get(replicaShard.shardId()))) { @@ -139,6 +139,14 @@ public synchronized void onNewCheckpoint(final ReplicationCheckpoint receivedChe startReplication(receivedCheckpoint, replicaShard, new SegmentReplicationListener() { @Override public void onReplicationDone(SegmentReplicationState state) { + logger.trace( + () -> new ParameterizedMessage( + "[shardId {}] [replication id {}] Replication complete, timing data: {}", + replicaShard.shardId().getId(), + state.getReplicationId(), + state.getTimingData() + ) + ); // if we received a checkpoint during the copy event that is ahead of this // try and process it. if (latestReceivedCheckpoint.get(replicaShard.shardId()).isAheadOf(replicaShard.getLatestReplicationCheckpoint())) { @@ -154,6 +162,14 @@ public void onReplicationDone(SegmentReplicationState state) { @Override public void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure) { + logger.trace( + () -> new ParameterizedMessage( + "[shardId {}] [replication id {}] Replication failed, timing data: {}", + replicaShard.shardId().getId(), + state.getReplicationId(), + state.getTimingData() + ) + ); if (sendShardFailure == true) { logger.error("replication failure", e); replicaShard.failShard("replication failure", e); @@ -172,9 +188,9 @@ public void startReplication( startReplication(new SegmentReplicationTarget(checkpoint, indexShard, sourceFactory.get(indexShard), listener)); } - public void startReplication(final SegmentReplicationTarget target) { + // pkg-private for integration tests + void startReplication(final SegmentReplicationTarget target) { final long replicationId = onGoingReplications.start(target, recoverySettings.activityTimeout()); - logger.trace(() -> new ParameterizedMessage("Starting replication {}", replicationId)); threadPool.generic().execute(new ReplicationRunner(replicationId)); } diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java index 8093b6aee88f9..cc51082639cdb 100644 --- a/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java @@ -29,6 +29,7 @@ import org.opensearch.index.shard.IndexShardClosedException; import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.SegmentReplicationTargetService; +import org.opensearch.indices.replication.common.ReplicationTimer; import org.opensearch.node.NodeClosedException; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; @@ -103,7 +104,10 @@ final void publish(IndexShard indexShard) { // we have to execute under the system context so that if security is enabled the sync is authorized threadContext.markAsSystemContext(); PublishCheckpointRequest request = new PublishCheckpointRequest(indexShard.getLatestReplicationCheckpoint()); + final ReplicationCheckpoint checkpoint = request.getCheckpoint(); final ReplicationTask task = (ReplicationTask) taskManager.register("transport", "segrep_publish_checkpoint", request); + final ReplicationTimer timer = new ReplicationTimer(); + timer.start(); transportService.sendChildRequest( clusterService.localNode(), transportPrimaryAction, @@ -123,12 +127,23 @@ public String executor() { @Override public void handleResponse(ReplicationResponse response) { + timer.stop(); + logger.trace( + () -> new ParameterizedMessage( + "[shardId {}] Completed publishing checkpoint [{}], timing: {}", + indexShard.shardId().getId(), + checkpoint, + timer.time() + ) + ); task.setPhase("finished"); taskManager.unregister(task); } @Override public void handleException(TransportException e) { + timer.stop(); + logger.trace("[shardId {}] Failed to publish checkpoint, timing: {}", indexShard.shardId().getId(), timer.time()); task.setPhase("finished"); taskManager.unregister(task); if (ExceptionsHelper.unwrap(e, NodeClosedException.class) != null) { @@ -151,6 +166,13 @@ public void handleException(TransportException e) { } } ); + logger.trace( + () -> new ParameterizedMessage( + "[shardId {}] Publishing replication checkpoint [{}]", + checkpoint.getShardId().getId(), + checkpoint + ) + ); } } @@ -168,7 +190,7 @@ protected void shardOperationOnReplica(PublishCheckpointRequest request, IndexSh Objects.requireNonNull(request); Objects.requireNonNull(replica); ActionListener.completeWith(listener, () -> { - logger.trace("Checkpoint received on replica {}", request); + logger.trace(() -> new ParameterizedMessage("Checkpoint {} received on replica {}", request, replica.shardId())); if (request.getCheckpoint().getShardId().equals(replica.shardId())) { replicationService.onNewCheckpoint(request.getCheckpoint(), replica); } diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java index 004fa7f614ef1..d3a6d1a97dacc 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -100,8 +100,8 @@ public void onReplicationFailure(SegmentReplicationState state, OpenSearchExcept ); final SegmentReplicationTarget spy = Mockito.spy(target); doAnswer(invocation -> { - // setting stage to REPLICATING so transition in markAsDone succeeds on listener completion - target.state().setStage(SegmentReplicationState.Stage.REPLICATING); + // set up stage correctly so the transition in markAsDone succeeds on listener completion + moveTargetToFinalStage(target); final ActionListener listener = invocation.getArgument(0); listener.onResponse(null); return null; @@ -123,7 +123,7 @@ public void onReplicationDone(SegmentReplicationState state) { @Override public void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure) { - assertEquals(SegmentReplicationState.Stage.REPLICATING, state.getStage()); + assertEquals(SegmentReplicationState.Stage.INIT, state.getStage()); assertEquals(expectedError, e.getCause()); assertTrue(sendShardFailure); } @@ -131,8 +131,6 @@ public void onReplicationFailure(SegmentReplicationState state, OpenSearchExcept ); final SegmentReplicationTarget spy = Mockito.spy(target); doAnswer(invocation -> { - // setting stage to REPLICATING so transition in markAsDone succeeds on listener completion - target.state().setStage(SegmentReplicationState.Stage.REPLICATING); final ActionListener listener = invocation.getArgument(0); listener.onFailure(expectedError); return null; @@ -271,4 +269,17 @@ public void testBeforeIndexShardClosed_CancelsOngoingReplications() { sut.beforeIndexShardClosed(indexShard.shardId(), indexShard, Settings.EMPTY); verify(spy, times(1)).cancel(any()); } + + /** + * Move the {@link SegmentReplicationTarget} object through its {@link SegmentReplicationState.Stage} values in order + * until the final, non-terminal stage. + */ + private void moveTargetToFinalStage(SegmentReplicationTarget target) { + SegmentReplicationState.Stage[] stageValues = SegmentReplicationState.Stage.values(); + assertEquals(target.state().getStage(), SegmentReplicationState.Stage.INIT); + // Skip the first two stages (DONE and INIT) and iterate until the last value + for (int i = 2; i < stageValues.length; i++) { + target.state().setStage(stageValues[i]); + } + } } diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java index 1157c463785ac..11217a46b3c69 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java @@ -28,6 +28,7 @@ import org.junit.Assert; import org.mockito.Mockito; import org.opensearch.ExceptionsHelper; +import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; @@ -96,7 +97,7 @@ public class SegmentReplicationTargetTests extends IndexShardTestCase { Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, org.opensearch.Version.CURRENT).build() ); - SegmentInfos testSegmentInfos; + private SegmentInfos testSegmentInfos; @Override public void setUp() throws Exception { @@ -162,6 +163,7 @@ public void getSegmentFiles( public void onResponse(Void replicationResponse) { try { verify(spyIndexShard, times(1)).finalizeReplication(any(), anyLong()); + segrepTarget.markAsDone(); } catch (IOException ex) { Assert.fail(); } @@ -169,7 +171,7 @@ public void onResponse(Void replicationResponse) { @Override public void onFailure(Exception e) { - logger.error("Unexpected test error", e); + logger.error("Unexpected onFailure", e); Assert.fail(); } }); @@ -213,6 +215,7 @@ public void onResponse(Void replicationResponse) { @Override public void onFailure(Exception e) { assertEquals(exception, e.getCause().getCause()); + segrepTarget.fail(new OpenSearchException(e), false); } }); } @@ -255,6 +258,7 @@ public void onResponse(Void replicationResponse) { @Override public void onFailure(Exception e) { assertEquals(exception, e.getCause().getCause()); + segrepTarget.fail(new OpenSearchException(e), false); } }); } @@ -299,6 +303,7 @@ public void onResponse(Void replicationResponse) { @Override public void onFailure(Exception e) { assertEquals(exception, e.getCause()); + segrepTarget.fail(new OpenSearchException(e), false); } }); } @@ -343,6 +348,7 @@ public void onResponse(Void replicationResponse) { @Override public void onFailure(Exception e) { assertEquals(exception, e.getCause()); + segrepTarget.fail(new OpenSearchException(e), false); } }); } @@ -384,6 +390,7 @@ public void onResponse(Void replicationResponse) { @Override public void onFailure(Exception e) { assert (e instanceof IllegalStateException); + segrepTarget.fail(new OpenSearchException(e), false); } }); } @@ -432,11 +439,13 @@ public void getSegmentFiles( @Override public void onResponse(Void replicationResponse) { logger.info("No error processing checkpoint info"); + segrepTarget.markAsDone(); } @Override public void onFailure(Exception e) { - assert (e instanceof IllegalStateException); + logger.error("Unexpected onFailure", e); + Assert.fail(); } }); } @@ -448,7 +457,7 @@ public void onFailure(Exception e) { * @return * @throws IOException */ - List generateStoreMetadataSnapshot(int docCount) throws IOException { + private List generateStoreMetadataSnapshot(int docCount) throws IOException { List docList = new ArrayList<>(); for (int i = 0; i < docCount; i++) { Document document = new Document(); @@ -480,7 +489,7 @@ List generateStoreMetadataSnapshot(int docCount) throws return Arrays.asList(storeMetadata, storeMetadataWithDeletes); } - public static void deleteContent(Directory directory) throws IOException { + private static void deleteContent(Directory directory) throws IOException { final String[] files = directory.listAll(); final List exceptions = new ArrayList<>(); for (String file : files) { @@ -498,7 +507,6 @@ public static void deleteContent(Directory directory) throws IOException { @Override public void tearDown() throws Exception { super.tearDown(); - segrepTarget.markAsDone(); closeShards(spyIndexShard, indexShard); } } From 36f1d77ad7983d9a58bfd755423ce806941fd930 Mon Sep 17 00:00:00 2001 From: Owais Kazi Date: Wed, 17 Aug 2022 17:14:09 -0700 Subject: [PATCH 096/104] Handles the status code for `.` properties (#4246) * Return 400 status code for array out of bound Signed-off-by: Owais Kazi * Spotless apply Signed-off-by: Owais Kazi * PR comments Signed-off-by: Owais Kazi Signed-off-by: Owais Kazi --- .../opensearch/index/mapper/ObjectMapper.java | 4 ++++ .../index/mapper/ObjectMapperTests.java | 20 +++++++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/server/src/main/java/org/opensearch/index/mapper/ObjectMapper.java b/server/src/main/java/org/opensearch/index/mapper/ObjectMapper.java index fcbca6049ec0b..1702c7700cf60 100644 --- a/server/src/main/java/org/opensearch/index/mapper/ObjectMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/ObjectMapper.java @@ -380,6 +380,10 @@ protected static void parseProperties(ObjectMapper.Builder objBuilder, Map fieldBuilder = typeParser.parse(realFieldName, propNode, parserContext); for (int i = fieldNameParts.length - 2; i >= 0; --i) { diff --git a/server/src/test/java/org/opensearch/index/mapper/ObjectMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/ObjectMapperTests.java index 079475d9f3554..d6c89342c9df2 100644 --- a/server/src/test/java/org/opensearch/index/mapper/ObjectMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/ObjectMapperTests.java @@ -178,6 +178,26 @@ public void testFieldsWithFilledArrayShouldThrowException() throws Exception { } } + public void testDotAsFieldName() throws Exception { + String mapping = Strings.toString( + XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject(".") + .field("type", "text") + .endObject() + .endObject() + .endObject() + ); + + try { + createIndex("test").mapperService().documentMapperParser().parse("tweet", new CompressedXContent(mapping)); + fail("Expected MapperParsingException"); + } catch (MapperParsingException e) { + assertThat(e.getMessage(), containsString("Invalid field name")); + } + } + public void testFieldPropertiesArray() throws Exception { String mapping = Strings.toString( XContentFactory.jsonBuilder() From d308a2925818ad89e0a85161bc6ee43c057eb6d8 Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Wed, 17 Aug 2022 17:25:19 -0700 Subject: [PATCH 097/104] [Segment Replication] Update PrimaryShardAllocator to prefer replicas with higher replication checkpoint (#4041) * [Segment Replication] Update PrimaryShardAllocator to prefer replicas having higher replication checkpoint Signed-off-by: Suraj Singh * Use empty replication checkpoint to avoid NPE Signed-off-by: Suraj Singh * Update NodeGatewayStartedShards to optionally wire in/out ReplicationCheckpoint field Signed-off-by: Suraj Singh * Use default replication checkpoint causing EOF errors on empty checkpoint * Add indexSettings to GatewayAllocator to allow ReplicationCheckpoint comparator only for segrep enabled indices * Add unit tests for primary term first replica promotion & comparator fix * Fix NPE on empty IndexMetadata * Remove settings from AllocationService and directly inject in GatewayAllocator * Add more unit tests and minor code clean up Signed-off-by: Suraj Singh * Address review comments & integration test Signed-off-by: Suraj Singh * Fix comparator on null ReplicationCheckpoint Signed-off-by: Suraj Singh Signed-off-by: Suraj Singh --- .../gateway/PrimaryShardAllocator.java | 17 +- ...ransportNodesListGatewayStartedShards.java | 58 ++++- .../checkpoint/ReplicationCheckpoint.java | 7 +- .../gateway/PrimaryShardAllocatorTests.java | 229 +++++++++++++++++- .../test/gateway/TestGatewayAllocator.java | 17 +- 5 files changed, 314 insertions(+), 14 deletions(-) diff --git a/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java b/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java index b9cfebaa98521..4dc9396751fc9 100644 --- a/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java @@ -313,6 +313,11 @@ private static ShardStoreInfo shardStoreInfo(NodeGatewayStartedShards nodeShardS NodeGatewayStartedShards::primary ).reversed(); + private static final Comparator HIGHEST_REPLICATION_CHECKPOINT_FIRST_COMPARATOR = Comparator.comparing( + NodeGatewayStartedShards::replicationCheckpoint, + Comparator.nullsLast(Comparator.naturalOrder()) + ); + /** * Builds a list of nodes. If matchAnyShard is set to false, only nodes that have an allocation id matching * inSyncAllocationIds are added to the list. Otherwise, any node that has a shard is added to the list, but @@ -381,6 +386,12 @@ protected static NodeShardsResult buildNodeShardsResult( } } + /** + * Orders the active shards copies based on below comparators + * 1. No store exception i.e. shard copy is readable + * 2. Prefer previous primary shard + * 3. Prefer shard copy with the highest replication checkpoint. It is NO-OP for doc rep enabled indices. + */ final Comparator comparator; // allocation preference if (matchAnyShard) { // prefer shards with matching allocation ids @@ -388,9 +399,11 @@ protected static NodeShardsResult buildNodeShardsResult( (NodeGatewayStartedShards state) -> inSyncAllocationIds.contains(state.allocationId()) ).reversed(); comparator = matchingAllocationsFirst.thenComparing(NO_STORE_EXCEPTION_FIRST_COMPARATOR) - .thenComparing(PRIMARY_FIRST_COMPARATOR); + .thenComparing(PRIMARY_FIRST_COMPARATOR) + .thenComparing(HIGHEST_REPLICATION_CHECKPOINT_FIRST_COMPARATOR); } else { - comparator = NO_STORE_EXCEPTION_FIRST_COMPARATOR.thenComparing(PRIMARY_FIRST_COMPARATOR); + comparator = NO_STORE_EXCEPTION_FIRST_COMPARATOR.thenComparing(PRIMARY_FIRST_COMPARATOR) + .thenComparing(HIGHEST_REPLICATION_CHECKPOINT_FIRST_COMPARATOR); } nodeShardStates.sort(comparator); diff --git a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java index 78b4fa287ef59..953b4def9d653 100644 --- a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java +++ b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java @@ -35,6 +35,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; +import org.opensearch.Version; import org.opensearch.action.ActionListener; import org.opensearch.action.ActionType; import org.opensearch.action.FailedNodeException; @@ -56,11 +57,13 @@ import org.opensearch.common.xcontent.NamedXContentRegistry; import org.opensearch.env.NodeEnvironment; import org.opensearch.index.IndexSettings; +import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ShardId; import org.opensearch.index.shard.ShardPath; import org.opensearch.index.shard.ShardStateMetadata; import org.opensearch.index.store.Store; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -195,6 +198,7 @@ protected NodeGatewayStartedShards nodeOperation(NodeRequest request) { clusterService.localNode(), allocationId, shardStateMetadata.primary, + null, exception ); } @@ -202,10 +206,16 @@ protected NodeGatewayStartedShards nodeOperation(NodeRequest request) { logger.debug("{} shard state info found: [{}]", shardId, shardStateMetadata); String allocationId = shardStateMetadata.allocationId != null ? shardStateMetadata.allocationId.getId() : null; - return new NodeGatewayStartedShards(clusterService.localNode(), allocationId, shardStateMetadata.primary); + final IndexShard shard = indicesService.getShardOrNull(shardId); + return new NodeGatewayStartedShards( + clusterService.localNode(), + allocationId, + shardStateMetadata.primary, + shard != null ? shard.getLatestReplicationCheckpoint() : null + ); } logger.trace("{} no local shard info found", shardId); - return new NodeGatewayStartedShards(clusterService.localNode(), null, false); + return new NodeGatewayStartedShards(clusterService.localNode(), null, false, null); } catch (Exception e) { throw new OpenSearchException("failed to load started shards", e); } @@ -349,10 +359,10 @@ public String getCustomDataPath() { * @opensearch.internal */ public static class NodeGatewayStartedShards extends BaseNodeResponse { - private final String allocationId; private final boolean primary; private final Exception storeException; + private final ReplicationCheckpoint replicationCheckpoint; public NodeGatewayStartedShards(StreamInput in) throws IOException { super(in); @@ -363,16 +373,33 @@ public NodeGatewayStartedShards(StreamInput in) throws IOException { } else { storeException = null; } + if (in.getVersion().onOrAfter(Version.V_3_0_0) && in.readBoolean()) { + replicationCheckpoint = new ReplicationCheckpoint(in); + } else { + replicationCheckpoint = null; + } } - public NodeGatewayStartedShards(DiscoveryNode node, String allocationId, boolean primary) { - this(node, allocationId, primary, null); + public NodeGatewayStartedShards( + DiscoveryNode node, + String allocationId, + boolean primary, + ReplicationCheckpoint replicationCheckpoint + ) { + this(node, allocationId, primary, replicationCheckpoint, null); } - public NodeGatewayStartedShards(DiscoveryNode node, String allocationId, boolean primary, Exception storeException) { + public NodeGatewayStartedShards( + DiscoveryNode node, + String allocationId, + boolean primary, + ReplicationCheckpoint replicationCheckpoint, + Exception storeException + ) { super(node); this.allocationId = allocationId; this.primary = primary; + this.replicationCheckpoint = replicationCheckpoint; this.storeException = storeException; } @@ -384,6 +411,10 @@ public boolean primary() { return this.primary; } + public ReplicationCheckpoint replicationCheckpoint() { + return this.replicationCheckpoint; + } + public Exception storeException() { return this.storeException; } @@ -399,6 +430,14 @@ public void writeTo(StreamOutput out) throws IOException { } else { out.writeBoolean(false); } + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (replicationCheckpoint != null) { + out.writeBoolean(true); + replicationCheckpoint.writeTo(out); + } else { + out.writeBoolean(false); + } + } } @Override @@ -414,7 +453,8 @@ public boolean equals(Object o) { return primary == that.primary && Objects.equals(allocationId, that.allocationId) - && Objects.equals(storeException, that.storeException); + && Objects.equals(storeException, that.storeException) + && Objects.equals(replicationCheckpoint, that.replicationCheckpoint); } @Override @@ -422,6 +462,7 @@ public int hashCode() { int result = (allocationId != null ? allocationId.hashCode() : 0); result = 31 * result + (primary ? 1 : 0); result = 31 * result + (storeException != null ? storeException.hashCode() : 0); + result = 31 * result + (replicationCheckpoint != null ? replicationCheckpoint.hashCode() : 0); return result; } @@ -432,6 +473,9 @@ public String toString() { if (storeException != null) { buf.append(",storeException=").append(storeException); } + if (replicationCheckpoint != null) { + buf.append(",ReplicationCheckpoint=").append(replicationCheckpoint.toString()); + } buf.append("]"); return buf.toString(); } diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java index 8afb5bd055636..6a4e5e449f178 100644 --- a/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java @@ -23,7 +23,7 @@ * * @opensearch.internal */ -public class ReplicationCheckpoint implements Writeable { +public class ReplicationCheckpoint implements Writeable, Comparable { private final ShardId shardId; private final long primaryTerm; @@ -107,6 +107,11 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(segmentInfosVersion); } + @Override + public int compareTo(ReplicationCheckpoint other) { + return this.isAheadOf(other) ? -1 : 1; + } + @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/server/src/test/java/org/opensearch/gateway/PrimaryShardAllocatorTests.java b/server/src/test/java/org/opensearch/gateway/PrimaryShardAllocatorTests.java index 4a1ecb9661687..3c39ec9f03b2a 100644 --- a/server/src/test/java/org/opensearch/gateway/PrimaryShardAllocatorTests.java +++ b/server/src/test/java/org/opensearch/gateway/PrimaryShardAllocatorTests.java @@ -62,6 +62,7 @@ import org.opensearch.common.util.set.Sets; import org.opensearch.env.ShardLockObtainFailedException; import org.opensearch.index.shard.ShardId; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.repositories.IndexId; import org.opensearch.snapshots.Snapshot; import org.opensearch.snapshots.SnapshotId; @@ -205,6 +206,203 @@ public void testShardLockObtainFailedException() { assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW); } + /** + * Tests that replica with the highest primary term version will be selected as target + */ + public void testPreferReplicaWithHighestPrimaryTerm() { + String allocId1 = randomAlphaOfLength(10); + String allocId2 = randomAlphaOfLength(10); + String allocId3 = randomAlphaOfLength(10); + final RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas( + yesAllocationDeciders(), + CLUSTER_RECOVERED, + allocId1, + allocId2, + allocId3 + ); + testAllocator.addData(node1, allocId1, false, new ReplicationCheckpoint(shardId, 20, 10, 101, 1)); + testAllocator.addData(node2, allocId2, false, new ReplicationCheckpoint(shardId, 22, 10, 120, 2)); + testAllocator.addData(node3, allocId3, false, new ReplicationCheckpoint(shardId, 20, 10, 120, 2)); + allocateAllUnassigned(allocation); + assertThat(allocation.routingNodesChanged(), equalTo(true)); + assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); + assertThat( + allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), + equalTo(node2.getId()) + ); + // Assert node2's allocation id is used + assertThat( + allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).allocationId().getId(), + equalTo(allocId2) + ); + assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW); + } + + /** + * Tests that replica with highest primary ter version will be selected as target + */ + public void testPreferReplicaWithNullReplicationCheckpoint() { + String allocId1 = randomAlphaOfLength(10); + String allocId2 = randomAlphaOfLength(10); + String allocId3 = randomAlphaOfLength(10); + final RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas( + yesAllocationDeciders(), + CLUSTER_RECOVERED, + allocId1, + allocId2, + allocId3 + ); + testAllocator.addData(node1, allocId1, false, new ReplicationCheckpoint(shardId, 20, 10, 101, 1)); + testAllocator.addData(node2, allocId2, false); + testAllocator.addData(node3, allocId3, false, new ReplicationCheckpoint(shardId, 40, 10, 120, 2)); + allocateAllUnassigned(allocation); + assertThat(allocation.routingNodesChanged(), equalTo(true)); + assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); + assertThat( + allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), + equalTo(node3.getId()) + ); + // Assert node3's allocation id should be used as it has highest replication checkpoint + assertThat( + allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).allocationId().getId(), + equalTo(allocId3) + ); + assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW); + } + + /** + * Tests that null ReplicationCheckpoint are ignored + */ + public void testPreferReplicaWithAllNullReplicationCheckpoint() { + String allocId1 = randomAlphaOfLength(10); + String allocId2 = randomAlphaOfLength(10); + String allocId3 = randomAlphaOfLength(10); + final RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas( + yesAllocationDeciders(), + CLUSTER_RECOVERED, + allocId1, + allocId2, + allocId3 + ); + testAllocator.addData(node1, allocId1, false, null, null); + testAllocator.addData(node2, allocId2, false, null, null); + testAllocator.addData(node3, allocId3, true, null, null); + allocateAllUnassigned(allocation); + assertThat(allocation.routingNodesChanged(), equalTo(true)); + assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); + assertThat( + allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), + equalTo(node3.getId()) + ); + // Assert node3's allocation id should be used as it was previous primary + assertThat( + allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).allocationId().getId(), + equalTo(allocId3) + ); + assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW); + } + + /** + * Tests that replica with highest segment info version will be selected as target on equal primary terms + */ + public void testPreferReplicaWithHighestSegmentInfoVersion() { + String allocId1 = randomAlphaOfLength(10); + String allocId2 = randomAlphaOfLength(10); + String allocId3 = randomAlphaOfLength(10); + final RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas( + yesAllocationDeciders(), + CLUSTER_RECOVERED, + allocId1, + allocId2, + allocId3 + ); + testAllocator.addData(node1, allocId1, false, new ReplicationCheckpoint(shardId, 10, 10, 101, 1)); + testAllocator.addData(node2, allocId2, false, new ReplicationCheckpoint(shardId, 20, 10, 120, 3)); + testAllocator.addData(node3, allocId3, false, new ReplicationCheckpoint(shardId, 20, 10, 120, 2)); + allocateAllUnassigned(allocation); + assertThat(allocation.routingNodesChanged(), equalTo(true)); + assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); + assertThat( + allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), + equalTo(node2.getId()) + ); + // Assert node2's allocation id is used + assertThat( + allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).allocationId().getId(), + equalTo(allocId2) + ); + assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW); + } + + /** + * Tests that prefer allocation of replica at lower checkpoint but in sync set + */ + public void testOutOfSyncHighestRepCheckpointIsIgnored() { + String allocId1 = randomAlphaOfLength(10); + String allocId2 = randomAlphaOfLength(10); + String allocId3 = randomAlphaOfLength(10); + final RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas( + yesAllocationDeciders(), + CLUSTER_RECOVERED, + allocId1, + allocId3 + ); + testAllocator.addData(node1, allocId1, false, new ReplicationCheckpoint(shardId, 10, 10, 101, 1)); + testAllocator.addData(node2, allocId2, false, new ReplicationCheckpoint(shardId, 20, 10, 120, 2)); + testAllocator.addData(node3, allocId3, false, new ReplicationCheckpoint(shardId, 15, 10, 120, 2)); + allocateAllUnassigned(allocation); + assertThat(allocation.routingNodesChanged(), equalTo(true)); + assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); + assertThat( + allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), + equalTo(node3.getId()) + ); + // Assert node3's allocation id is used + assertThat( + allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).allocationId().getId(), + equalTo(allocId3) + ); + assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW); + } + + /** + * Tests that prefer allocation of older primary over replica with higher replication checkpoint + */ + public void testPreferAllocatingPreviousPrimaryWithLowerRepCheckpoint() { + String allocId1 = randomAlphaOfLength(10); + String allocId2 = randomAlphaOfLength(10); + String allocId3 = randomAlphaOfLength(10); + final RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas( + yesAllocationDeciders(), + CLUSTER_RECOVERED, + allocId1, + allocId2, + allocId3 + ); + testAllocator.addData(node1, allocId1, true, new ReplicationCheckpoint(shardId, 10, 10, 101, 1)); + testAllocator.addData(node2, allocId2, false, new ReplicationCheckpoint(shardId, 20, 10, 120, 2)); + testAllocator.addData(node3, allocId3, false, new ReplicationCheckpoint(shardId, 15, 10, 120, 2)); + allocateAllUnassigned(allocation); + assertThat(allocation.routingNodesChanged(), equalTo(true)); + assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); + assertThat( + allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), + equalTo(node1.getId()) + ); + // Assert node1's allocation id is used with highest replication checkpoint + assertThat( + allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).allocationId().getId(), + equalTo(allocId1) + ); + assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW); + } + /** * Tests that when one node returns a ShardLockObtainFailedException and another properly loads the store, it will * select the second node as target @@ -219,7 +417,7 @@ public void testShardLockObtainFailedExceptionPreferOtherValidCopies() { allocId2 ); testAllocator.addData(node1, allocId1, randomBoolean(), new ShardLockObtainFailedException(shardId, "test")); - testAllocator.addData(node2, allocId2, randomBoolean(), null); + testAllocator.addData(node2, allocId2, randomBoolean()); allocateAllUnassigned(allocation); assertThat(allocation.routingNodesChanged(), equalTo(true)); assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); @@ -601,17 +799,42 @@ public TestAllocator clear() { return this; } + public TestAllocator addData( + DiscoveryNode node, + String allocationId, + boolean primary, + ReplicationCheckpoint replicationCheckpoint + ) { + return addData(node, allocationId, primary, replicationCheckpoint, null); + } + public TestAllocator addData(DiscoveryNode node, String allocationId, boolean primary) { - return addData(node, allocationId, primary, null); + return addData(node, allocationId, primary, ReplicationCheckpoint.empty(shardId), null); } public TestAllocator addData(DiscoveryNode node, String allocationId, boolean primary, @Nullable Exception storeException) { + return addData(node, allocationId, primary, ReplicationCheckpoint.empty(shardId), storeException); + } + + public TestAllocator addData( + DiscoveryNode node, + String allocationId, + boolean primary, + ReplicationCheckpoint replicationCheckpoint, + @Nullable Exception storeException + ) { if (data == null) { data = new HashMap<>(); } data.put( node, - new TransportNodesListGatewayStartedShards.NodeGatewayStartedShards(node, allocationId, primary, storeException) + new TransportNodesListGatewayStartedShards.NodeGatewayStartedShards( + node, + allocationId, + primary, + replicationCheckpoint, + storeException + ) ); return this; } diff --git a/test/framework/src/main/java/org/opensearch/test/gateway/TestGatewayAllocator.java b/test/framework/src/main/java/org/opensearch/test/gateway/TestGatewayAllocator.java index 54c92f4d519aa..a36dc26685eb4 100644 --- a/test/framework/src/main/java/org/opensearch/test/gateway/TestGatewayAllocator.java +++ b/test/framework/src/main/java/org/opensearch/test/gateway/TestGatewayAllocator.java @@ -43,6 +43,7 @@ import org.opensearch.gateway.ReplicaShardAllocator; import org.opensearch.gateway.TransportNodesListGatewayStartedShards.NodeGatewayStartedShards; import org.opensearch.index.shard.ShardId; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.store.TransportNodesListShardStoreMetadata.NodeStoreFilesMetadata; import java.util.Collections; @@ -71,6 +72,7 @@ public class TestGatewayAllocator extends GatewayAllocator { Map> knownAllocations = new HashMap<>(); DiscoveryNodes currentNodes = DiscoveryNodes.EMPTY_NODES; + Map shardIdNodeToReplicationCheckPointMap = new HashMap<>(); PrimaryShardAllocator primaryShardAllocator = new PrimaryShardAllocator() { @Override @@ -90,7 +92,8 @@ protected AsyncShardFetch.FetchResult fetchData(ShardR routing -> new NodeGatewayStartedShards( currentNodes.get(routing.currentNodeId()), routing.allocationId().getId(), - routing.primary() + routing.primary(), + getReplicationCheckpoint(shardId, routing.currentNodeId()) ) ) ); @@ -99,6 +102,10 @@ protected AsyncShardFetch.FetchResult fetchData(ShardR } }; + private ReplicationCheckpoint getReplicationCheckpoint(ShardId shardId, String nodeName) { + return shardIdNodeToReplicationCheckPointMap.getOrDefault(getReplicationCheckPointKey(shardId, nodeName), null); + } + ReplicaShardAllocator replicaShardAllocator = new ReplicaShardAllocator() { @Override protected AsyncShardFetch.FetchResult fetchData(ShardRouting shard, RoutingAllocation allocation) { @@ -156,4 +163,12 @@ public void allocateUnassigned( public void addKnownAllocation(ShardRouting shard) { knownAllocations.computeIfAbsent(shard.currentNodeId(), id -> new HashMap<>()).put(shard.shardId(), shard); } + + public String getReplicationCheckPointKey(ShardId shardId, String nodeName) { + return shardId.toString() + "_" + nodeName; + } + + public void addReplicationCheckpoint(ShardId shardId, String nodeName, ReplicationCheckpoint replicationCheckpoint) { + shardIdNodeToReplicationCheckPointMap.putIfAbsent(getReplicationCheckPointKey(shardId, nodeName), replicationCheckpoint); + } } From ee26e010fcb074f073eaa6bdeeb563c42b382825 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Thu, 18 Aug 2022 15:51:26 -0400 Subject: [PATCH 098/104] Fixing NodeGatewayStartedShards bwc (de)serialization issues (#4258) Signed-off-by: Andriy Redko Signed-off-by: Andriy Redko --- modules/repository-s3/licenses/commons-logging-1.2.jar.sha1 | 1 - .../gateway/TransportNodesListGatewayStartedShards.java | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) delete mode 100644 modules/repository-s3/licenses/commons-logging-1.2.jar.sha1 diff --git a/modules/repository-s3/licenses/commons-logging-1.2.jar.sha1 b/modules/repository-s3/licenses/commons-logging-1.2.jar.sha1 deleted file mode 100644 index f40f0242448e8..0000000000000 --- a/modules/repository-s3/licenses/commons-logging-1.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4bfc12adfe4842bf07b657f0369c4cb522955686 \ No newline at end of file diff --git a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java index 953b4def9d653..c43f539243d7a 100644 --- a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java +++ b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java @@ -373,7 +373,7 @@ public NodeGatewayStartedShards(StreamInput in) throws IOException { } else { storeException = null; } - if (in.getVersion().onOrAfter(Version.V_3_0_0) && in.readBoolean()) { + if (in.getVersion().onOrAfter(Version.V_2_3_0) && in.readBoolean()) { replicationCheckpoint = new ReplicationCheckpoint(in); } else { replicationCheckpoint = null; @@ -430,7 +430,7 @@ public void writeTo(StreamOutput out) throws IOException { } else { out.writeBoolean(false); } - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_3_0)) { if (replicationCheckpoint != null) { out.writeBoolean(true); replicationCheckpoint.writeTo(out); From 4643620eda3818ef63c474f90dde6814128101c8 Mon Sep 17 00:00:00 2001 From: Daniel Widdis Date: Sun, 21 Aug 2022 10:52:20 -0700 Subject: [PATCH 099/104] Fix AbstractStringFieldDataTestCase tests to account for TotalHits lower bound (#4270) Fixes tests to account for TotalHits uncertainty as of Lucene 9. Signed-off-by: Daniel Widdis --- .../index/fielddata/AbstractStringFieldDataTestCase.java | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/server/src/test/java/org/opensearch/index/fielddata/AbstractStringFieldDataTestCase.java b/server/src/test/java/org/opensearch/index/fielddata/AbstractStringFieldDataTestCase.java index 763ee59a385a2..76496491b3ed4 100644 --- a/server/src/test/java/org/opensearch/index/fielddata/AbstractStringFieldDataTestCase.java +++ b/server/src/test/java/org/opensearch/index/fielddata/AbstractStringFieldDataTestCase.java @@ -52,6 +52,7 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopFieldDocs; +import org.apache.lucene.search.TotalHits; import org.apache.lucene.search.join.QueryBitSetProducer; import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.search.join.ToParentBlockJoinQuery; @@ -340,7 +341,13 @@ public void testSortMissing(boolean first, boolean reverse) throws IOException { randomBoolean() ? numDocs : randomIntBetween(10, numDocs), new Sort(sortField) ); - assertEquals(numDocs, topDocs.totalHits.value); + // As of Lucene 9.0.0, totalHits may be a lower bound + if (topDocs.totalHits.relation == TotalHits.Relation.EQUAL_TO) { + assertEquals(numDocs, topDocs.totalHits.value); + } else { + assertTrue(1000 <= topDocs.totalHits.value); + assertTrue(numDocs >= topDocs.totalHits.value); + } BytesRef previousValue = first ? null : reverse ? UnicodeUtil.BIG_TERM : new BytesRef(); for (int i = 0; i < topDocs.scoreDocs.length; ++i) { final String docValue = searcher.doc(topDocs.scoreDocs[i].doc).get("value"); From 6629b09aad2629fafb7aad998153867c2f3fe472 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Aug 2022 09:40:48 -0400 Subject: [PATCH 100/104] Bump com.gradle.enterprise from 3.10.3 to 3.11.1 (#4273) Bumps com.gradle.enterprise from 3.10.3 to 3.11.1. --- updated-dependencies: - dependency-name: com.gradle.enterprise dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- settings.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/settings.gradle b/settings.gradle index 65dc6a13100e2..4c389b5490e7c 100644 --- a/settings.gradle +++ b/settings.gradle @@ -10,7 +10,7 @@ */ plugins { - id "com.gradle.enterprise" version "3.10.3" + id "com.gradle.enterprise" version "3.11.1" } buildCache { From 54330560a7064434e088dad0eeb61ad0df2cfa6b Mon Sep 17 00:00:00 2001 From: Navneet Verma Date: Mon, 22 Aug 2022 13:03:22 -0700 Subject: [PATCH 101/104] Refactored the src and test of GeoHashGrid and GeoTileGrid Aggregations on GeoPoint from server folder to geo module.(#4071) (#4072) (#4180) The changes also includes: * Updated Search plugin to provide the interface so that plugins can also register the composite aggregations * Added YAML test for the geo_grid, geo_tile and composite aggregation Signed-off-by: Navneet Verma --- .../client/RestHighLevelClient.java | 6 - modules/geo/build.gradle | 9 +- .../aggregations/bucket/GeoHashGridIT.java | 39 +- .../aggregations/bucket/ShardReduceIT.java | 107 +++++ ...actGeoAggregatorModulePluginTestCase.java} | 2 +- ...BoundsIT.java => GeoBoundsITTestCase.java} | 2 +- .../metrics/GeoCentroidITTestCase.java | 84 ++++ .../org/opensearch/geo/GeoModulePlugin.java | 42 +- .../GeoTileGridValuesSourceBuilder.java | 29 +- .../bucket/composite/GeoTileValuesSource.java | 8 +- .../bucket/geogrid/BoundedCellValues.java | 2 +- .../bucket/geogrid/BucketPriorityQueue.java | 2 +- .../bucket/geogrid/CellIdSource.java | 2 +- .../bucket/geogrid/CellValues.java | 2 +- .../aggregations/bucket/geogrid/GeoGrid.java | 2 +- .../geogrid/GeoGridAggregationBuilder.java | 2 +- .../bucket/geogrid/GeoGridAggregator.java | 2 +- .../GeoHashGridAggregationBuilder.java | 4 +- .../bucket/geogrid/GeoHashGridAggregator.java | 2 +- .../geogrid/GeoHashGridAggregatorFactory.java | 2 +- .../GeoTileGridAggregationBuilder.java | 5 +- .../bucket/geogrid/GeoTileGridAggregator.java | 2 +- .../geogrid/GeoTileGridAggregatorFactory.java | 3 +- .../bucket/geogrid/InternalGeoGrid.java | 2 +- .../bucket/geogrid/InternalGeoGridBucket.java | 2 +- .../bucket/geogrid/InternalGeoHashGrid.java | 2 +- .../geogrid/InternalGeoHashGridBucket.java | 2 +- .../bucket/geogrid/InternalGeoTileGrid.java | 2 +- .../geogrid/InternalGeoTileGridBucket.java | 3 +- .../bucket/geogrid/ParsedGeoGrid.java | 2 +- .../bucket/geogrid/ParsedGeoGridBucket.java | 2 +- .../bucket/geogrid/ParsedGeoHashGrid.java | 2 +- .../geogrid/ParsedGeoHashGridBucket.java | 2 +- .../bucket/geogrid/ParsedGeoTileGrid.java | 2 +- .../geogrid/ParsedGeoTileGridBucket.java | 3 +- .../bucket/geogrid/UnboundedCellValues.java | 2 +- .../bucket/geogrid/package-info.java | 2 +- .../metrics/GeoGridAggregatorSupplier.java | 4 +- .../GeoHashGridAggregationBuilderTests.java | 21 +- .../GeoTileGridAggregationBuilderTests.java | 23 +- ...idAggregationCompositeAggregatorTests.java | 174 ++++++++ ...eGridCompositeAggregationBuilderTests.java | 50 +++ .../GeoTileGridValuesSourceBuilderTests.java | 3 +- .../geogrid/GeoGridAggregatorTestCase.java | 20 +- .../bucket/geogrid/GeoGridTestCase.java | 39 +- .../geogrid/GeoHashGridAggregatorTests.java | 2 +- .../geogrid/GeoHashGridParserTests.java | 2 +- .../bucket/geogrid/GeoHashGridTests.java | 2 +- .../geogrid/GeoTileGridAggregatorTests.java | 4 +- .../geogrid/GeoTileGridParserTests.java | 3 +- .../bucket/geogrid/GeoTileGridTests.java | 3 +- .../geo/tests/common/AggregationBuilders.java | 18 + .../common/AggregationInspectionHelper.java | 5 + .../geo/tests/common/RandomGeoGenerator.java | 11 + .../test/geo_shape/230_composite.yml | 168 ++++++++ .../test/geo_shape}/280_geohash_grid.yml | 0 .../test/geo_shape}/290_geotile_grid.yml | 0 .../test/search.aggregation/230_composite.yml | 86 ---- .../aggregations/bucket/ShardReduceIT.java | 35 -- .../aggregations/metrics/GeoCentroidIT.java | 33 -- .../org/opensearch/plugins/SearchPlugin.java | 82 ++++ .../org/opensearch/search/DocValueFormat.java | 2 +- .../org/opensearch/search/SearchModule.java | 22 +- .../aggregations/AggregationBuilders.java | 18 - .../bucket/{geogrid => }/GeoTileUtils.java | 8 +- .../CompositeAggregationBuilder.java | 50 ++- .../CompositeAggregationParsingFunction.java | 22 + .../CompositeValuesSourceBuilder.java | 6 +- .../CompositeValuesSourceConfig.java | 18 +- .../CompositeValuesSourceParserHelper.java | 78 +++- .../bucket/composite/LongValuesSource.java | 6 +- .../SingleDimensionValuesSource.java | 2 +- .../support/AggregationInspectionHelper.java | 5 - .../aggregations/support/package-info.java | 2 +- .../search/DocValueFormatTests.java | 2 +- .../aggregations/AggregationsTests.java | 4 - .../CompositeAggregationBuilderTests.java | 20 +- .../composite/CompositeAggregatorTests.java | 384 +----------------- .../bucket/geogrid/GeoTileUtilsTests.java | 13 +- .../BaseCompositeAggregatorTestCase.java | 310 ++++++++++++++ .../test/InternalAggregationTestCase.java | 6 - 81 files changed, 1395 insertions(+), 761 deletions(-) rename {server/src/internalClusterTest/java/org/opensearch => modules/geo/src/internalClusterTest/java/org/opensearch/geo}/search/aggregations/bucket/GeoHashGridIT.java (89%) create mode 100644 modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/ShardReduceIT.java rename modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/{AbstractGeoAggregatorTestCaseModulePlugin.java => AbstractGeoAggregatorModulePluginTestCase.java} (99%) rename modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/{GeoBoundsIT.java => GeoBoundsITTestCase.java} (99%) create mode 100644 modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoCentroidITTestCase.java rename {server/src/main/java/org/opensearch => modules/geo/src/main/java/org/opensearch/geo}/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilder.java (87%) rename {server/src/main/java/org/opensearch => modules/geo/src/main/java/org/opensearch/geo}/search/aggregations/bucket/composite/GeoTileValuesSource.java (88%) rename {server/src/main/java/org/opensearch => modules/geo/src/main/java/org/opensearch/geo}/search/aggregations/bucket/geogrid/BoundedCellValues.java (97%) rename {server/src/main/java/org/opensearch => modules/geo/src/main/java/org/opensearch/geo}/search/aggregations/bucket/geogrid/BucketPriorityQueue.java (96%) rename {server/src/main/java/org/opensearch => modules/geo/src/main/java/org/opensearch/geo}/search/aggregations/bucket/geogrid/CellIdSource.java (98%) rename {server/src/main/java/org/opensearch => modules/geo/src/main/java/org/opensearch/geo}/search/aggregations/bucket/geogrid/CellValues.java (97%) rename {server/src/main/java/org/opensearch => modules/geo/src/main/java/org/opensearch/geo}/search/aggregations/bucket/geogrid/GeoGrid.java (96%) rename {server/src/main/java/org/opensearch => modules/geo/src/main/java/org/opensearch/geo}/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java (99%) rename {server/src/main/java/org/opensearch => modules/geo/src/main/java/org/opensearch/geo}/search/aggregations/bucket/geogrid/GeoGridAggregator.java (99%) rename {server/src/main/java/org/opensearch => modules/geo/src/main/java/org/opensearch/geo}/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java (96%) rename {server/src/main/java/org/opensearch => modules/geo/src/main/java/org/opensearch/geo}/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java (97%) rename {server/src/main/java/org/opensearch => modules/geo/src/main/java/org/opensearch/geo}/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java (98%) rename {server/src/main/java/org/opensearch => modules/geo/src/main/java/org/opensearch/geo}/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java (95%) rename {server/src/main/java/org/opensearch => modules/geo/src/main/java/org/opensearch/geo}/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java (97%) rename {server/src/main/java/org/opensearch => modules/geo/src/main/java/org/opensearch/geo}/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java (97%) rename {server/src/main/java/org/opensearch => modules/geo/src/main/java/org/opensearch/geo}/search/aggregations/bucket/geogrid/InternalGeoGrid.java (99%) rename {server/src/main/java/org/opensearch => modules/geo/src/main/java/org/opensearch/geo}/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java (98%) rename {server/src/main/java/org/opensearch => modules/geo/src/main/java/org/opensearch/geo}/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java (97%) rename {server/src/main/java/org/opensearch => modules/geo/src/main/java/org/opensearch/geo}/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java (96%) rename {server/src/main/java/org/opensearch => modules/geo/src/main/java/org/opensearch/geo}/search/aggregations/bucket/geogrid/InternalGeoTileGrid.java (97%) rename {server/src/main/java/org/opensearch => modules/geo/src/main/java/org/opensearch/geo}/search/aggregations/bucket/geogrid/InternalGeoTileGridBucket.java (94%) rename {server/src/main/java/org/opensearch => modules/geo/src/main/java/org/opensearch/geo}/search/aggregations/bucket/geogrid/ParsedGeoGrid.java (97%) rename {server/src/main/java/org/opensearch => modules/geo/src/main/java/org/opensearch/geo}/search/aggregations/bucket/geogrid/ParsedGeoGridBucket.java (96%) rename {server/src/main/java/org/opensearch => modules/geo/src/main/java/org/opensearch/geo}/search/aggregations/bucket/geogrid/ParsedGeoHashGrid.java (96%) rename {server/src/main/java/org/opensearch => modules/geo/src/main/java/org/opensearch/geo}/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java (96%) rename {server/src/main/java/org/opensearch => modules/geo/src/main/java/org/opensearch/geo}/search/aggregations/bucket/geogrid/ParsedGeoTileGrid.java (96%) rename {server/src/main/java/org/opensearch => modules/geo/src/main/java/org/opensearch/geo}/search/aggregations/bucket/geogrid/ParsedGeoTileGridBucket.java (93%) rename {server/src/main/java/org/opensearch => modules/geo/src/main/java/org/opensearch/geo}/search/aggregations/bucket/geogrid/UnboundedCellValues.java (96%) rename {server/src/main/java/org/opensearch => modules/geo/src/main/java/org/opensearch/geo}/search/aggregations/bucket/geogrid/package-info.java (79%) rename {server/src/main/java/org/opensearch => modules/geo/src/main/java/org/opensearch/geo}/search/aggregations/metrics/GeoGridAggregatorSupplier.java (93%) rename server/src/test/java/org/opensearch/search/aggregations/bucket/GeoHashGridTests.java => modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/GeoHashGridAggregationBuilderTests.java (72%) rename server/src/test/java/org/opensearch/search/aggregations/bucket/GeoTileGridTests.java => modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/GeoTileGridAggregationBuilderTests.java (70%) create mode 100644 modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridAggregationCompositeAggregatorTests.java create mode 100644 modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridCompositeAggregationBuilderTests.java rename {server/src/test/java/org/opensearch => modules/geo/src/test/java/org/opensearch/geo}/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilderTests.java (90%) rename {server/src/test/java/org/opensearch => modules/geo/src/test/java/org/opensearch/geo}/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java (95%) rename {server/src/test/java/org/opensearch => modules/geo/src/test/java/org/opensearch/geo}/search/aggregations/bucket/geogrid/GeoGridTestCase.java (79%) rename {server/src/test/java/org/opensearch => modules/geo/src/test/java/org/opensearch/geo}/search/aggregations/bucket/geogrid/GeoHashGridAggregatorTests.java (96%) rename {server/src/test/java/org/opensearch => modules/geo/src/test/java/org/opensearch/geo}/search/aggregations/bucket/geogrid/GeoHashGridParserTests.java (99%) rename {server/src/test/java/org/opensearch => modules/geo/src/test/java/org/opensearch/geo}/search/aggregations/bucket/geogrid/GeoHashGridTests.java (97%) rename {server/src/test/java/org/opensearch => modules/geo/src/test/java/org/opensearch/geo}/search/aggregations/bucket/geogrid/GeoTileGridAggregatorTests.java (94%) rename {server/src/test/java/org/opensearch => modules/geo/src/test/java/org/opensearch/geo}/search/aggregations/bucket/geogrid/GeoTileGridParserTests.java (97%) rename {server/src/test/java/org/opensearch => modules/geo/src/test/java/org/opensearch/geo}/search/aggregations/bucket/geogrid/GeoTileGridTests.java (94%) create mode 100644 modules/geo/src/yamlRestTest/resources/rest-api-spec/test/geo_shape/230_composite.yml rename {rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation => modules/geo/src/yamlRestTest/resources/rest-api-spec/test/geo_shape}/280_geohash_grid.yml (100%) rename {rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation => modules/geo/src/yamlRestTest/resources/rest-api-spec/test/geo_shape}/290_geotile_grid.yml (100%) rename server/src/main/java/org/opensearch/search/aggregations/bucket/{geogrid => }/GeoTileUtils.java (97%) create mode 100644 server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationParsingFunction.java create mode 100644 test/framework/src/main/java/org/opensearch/search/aggregations/composite/BaseCompositeAggregatorTestCase.java diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java index 7ae8f8826c5a4..28a441bdf7f7f 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java @@ -108,10 +108,6 @@ import org.opensearch.search.aggregations.bucket.filter.FiltersAggregationBuilder; import org.opensearch.search.aggregations.bucket.filter.ParsedFilter; import org.opensearch.search.aggregations.bucket.filter.ParsedFilters; -import org.opensearch.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; -import org.opensearch.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; -import org.opensearch.search.aggregations.bucket.geogrid.ParsedGeoHashGrid; -import org.opensearch.search.aggregations.bucket.geogrid.ParsedGeoTileGrid; import org.opensearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.opensearch.search.aggregations.bucket.global.ParsedGlobal; import org.opensearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder; @@ -2130,8 +2126,6 @@ static List getDefaultNamedXContents() { map.put(GlobalAggregationBuilder.NAME, (p, c) -> ParsedGlobal.fromXContent(p, (String) c)); map.put(FilterAggregationBuilder.NAME, (p, c) -> ParsedFilter.fromXContent(p, (String) c)); map.put(InternalSampler.PARSER_NAME, (p, c) -> ParsedSampler.fromXContent(p, (String) c)); - map.put(GeoHashGridAggregationBuilder.NAME, (p, c) -> ParsedGeoHashGrid.fromXContent(p, (String) c)); - map.put(GeoTileGridAggregationBuilder.NAME, (p, c) -> ParsedGeoTileGrid.fromXContent(p, (String) c)); map.put(RangeAggregationBuilder.NAME, (p, c) -> ParsedRange.fromXContent(p, (String) c)); map.put(DateRangeAggregationBuilder.NAME, (p, c) -> ParsedDateRange.fromXContent(p, (String) c)); map.put(GeoDistanceAggregationBuilder.NAME, (p, c) -> ParsedGeoDistance.fromXContent(p, (String) c)); diff --git a/modules/geo/build.gradle b/modules/geo/build.gradle index 0b8e623c24ac6..7f687a414e566 100644 --- a/modules/geo/build.gradle +++ b/modules/geo/build.gradle @@ -37,9 +37,16 @@ opensearchplugin { restResources { restApi { - includeCore '_common', 'indices', 'index', 'search' + includeCore '_common', 'indices', 'index', 'search', 'bulk' } } artifacts { restTests(project.file('src/yamlRestTest/resources/rest-api-spec/test')) } +/** + * These compiler arguments needs to be removed, as there are raw types being used in the GeoGrid and GeoTile aggregations. + */ +tasks.withType(JavaCompile).configureEach { + options.compilerArgs -= '-Xlint:rawtypes' + options.compilerArgs -= '-Xlint:unchecked' +} diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GeoHashGridIT.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/GeoHashGridIT.java similarity index 89% rename from server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GeoHashGridIT.java rename to modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/GeoHashGridIT.java index 56d918feef9d8..6ab7dd5254679 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GeoHashGridIT.java +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/GeoHashGridIT.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket; +package org.opensearch.geo.search.aggregations.bucket; import com.carrotsearch.hppc.ObjectIntHashMap; import com.carrotsearch.hppc.ObjectIntMap; @@ -41,12 +41,12 @@ import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.geo.GeoModulePluginIntegTestCase; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoGrid; +import org.opensearch.geo.tests.common.AggregationBuilders; import org.opensearch.index.query.GeoBoundingBoxQueryBuilder; -import org.opensearch.search.aggregations.AggregationBuilders; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.bucket.filter.Filter; -import org.opensearch.search.aggregations.bucket.geogrid.GeoGrid; -import org.opensearch.search.aggregations.bucket.geogrid.GeoGrid.Bucket; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; @@ -57,17 +57,16 @@ import java.util.Random; import java.util.Set; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.geometry.utils.Geohash.PRECISION; import static org.opensearch.geometry.utils.Geohash.stringEncode; -import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.opensearch.search.aggregations.AggregationBuilders.geohashGrid; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class GeoHashGridIT extends OpenSearchIntegTestCase { +public class GeoHashGridIT extends GeoModulePluginIntegTestCase { @Override protected boolean forbidPrivateIndexSettings() { @@ -158,13 +157,13 @@ public void setupSuiteScopeCluster() throws Exception { public void testSimple() throws Exception { for (int precision = 1; precision <= PRECISION; precision++) { SearchResponse response = client().prepareSearch("idx") - .addAggregation(geohashGrid("geohashgrid").field("location").precision(precision)) + .addAggregation(AggregationBuilders.geohashGrid("geohashgrid").field("location").precision(precision)) .get(); assertSearchResponse(response); GeoGrid geoGrid = response.getAggregations().get("geohashgrid"); - List buckets = geoGrid.getBuckets(); + List buckets = geoGrid.getBuckets(); Object[] propertiesKeys = (Object[]) ((InternalAggregation) geoGrid).getProperty("_key"); Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) geoGrid).getProperty("_count"); for (int i = 0; i < buckets.size(); i++) { @@ -185,7 +184,7 @@ public void testSimple() throws Exception { public void testMultivalued() throws Exception { for (int precision = 1; precision <= PRECISION; precision++) { SearchResponse response = client().prepareSearch("multi_valued_idx") - .addAggregation(geohashGrid("geohashgrid").field("location").precision(precision)) + .addAggregation(AggregationBuilders.geohashGrid("geohashgrid").field("location").precision(precision)) .get(); assertSearchResponse(response); @@ -208,8 +207,8 @@ public void testFiltered() throws Exception { for (int precision = 1; precision <= PRECISION; precision++) { SearchResponse response = client().prepareSearch("idx") .addAggregation( - AggregationBuilders.filter("filtered", bbox) - .subAggregation(geohashGrid("geohashgrid").field("location").precision(precision)) + org.opensearch.search.aggregations.AggregationBuilders.filter("filtered", bbox) + .subAggregation(AggregationBuilders.geohashGrid("geohashgrid").field("location").precision(precision)) ) .get(); @@ -233,7 +232,7 @@ public void testFiltered() throws Exception { public void testUnmapped() throws Exception { for (int precision = 1; precision <= PRECISION; precision++) { SearchResponse response = client().prepareSearch("idx_unmapped") - .addAggregation(geohashGrid("geohashgrid").field("location").precision(precision)) + .addAggregation(AggregationBuilders.geohashGrid("geohashgrid").field("location").precision(precision)) .get(); assertSearchResponse(response); @@ -247,7 +246,7 @@ public void testUnmapped() throws Exception { public void testPartiallyUnmapped() throws Exception { for (int precision = 1; precision <= PRECISION; precision++) { SearchResponse response = client().prepareSearch("idx", "idx_unmapped") - .addAggregation(geohashGrid("geohashgrid").field("location").precision(precision)) + .addAggregation(AggregationBuilders.geohashGrid("geohashgrid").field("location").precision(precision)) .get(); assertSearchResponse(response); @@ -267,7 +266,9 @@ public void testPartiallyUnmapped() throws Exception { public void testTopMatch() throws Exception { for (int precision = 1; precision <= PRECISION; precision++) { SearchResponse response = client().prepareSearch("idx") - .addAggregation(geohashGrid("geohashgrid").field("location").size(1).shardSize(100).precision(precision)) + .addAggregation( + AggregationBuilders.geohashGrid("geohashgrid").field("location").size(1).shardSize(100).precision(precision) + ) .get(); assertSearchResponse(response); @@ -296,7 +297,7 @@ public void testSizeIsZero() { IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, () -> client().prepareSearch("idx") - .addAggregation(geohashGrid("geohashgrid").field("location").size(size).shardSize(shardSize)) + .addAggregation(AggregationBuilders.geohashGrid("geohashgrid").field("location").size(size).shardSize(shardSize)) .get() ); assertThat(exception.getMessage(), containsString("[size] must be greater than 0. Found [0] in [geohashgrid]")); @@ -308,7 +309,7 @@ public void testShardSizeIsZero() { IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, () -> client().prepareSearch("idx") - .addAggregation(geohashGrid("geohashgrid").field("location").size(size).shardSize(shardSize)) + .addAggregation(AggregationBuilders.geohashGrid("geohashgrid").field("location").size(size).shardSize(shardSize)) .get() ); assertThat(exception.getMessage(), containsString("[shardSize] must be greater than 0. Found [0] in [geohashgrid]")); diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/ShardReduceIT.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/ShardReduceIT.java new file mode 100644 index 0000000000000..5b4dd052a2f65 --- /dev/null +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/ShardReduceIT.java @@ -0,0 +1,107 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.geo.search.aggregations.bucket; + +import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.geo.GeoModulePluginIntegTestCase; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoGrid; +import org.opensearch.geo.tests.common.AggregationBuilders; +import org.opensearch.geometry.utils.Geohash; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.opensearch.search.aggregations.bucket.histogram.Histogram; +import org.opensearch.test.OpenSearchIntegTestCase; + +import static org.hamcrest.Matchers.equalTo; +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; + +/** + * Tests making sure that the reduce is propagated to all aggregations in the hierarchy when executing on a single shard + * These tests are based on the date histogram in combination of min_doc_count=0. In order for the date histogram to + * compute empty buckets, its {@code reduce()} method must be called. So by adding the date histogram under other buckets, + * we can make sure that the reduce is properly propagated by checking that empty buckets were created. + */ +@OpenSearchIntegTestCase.SuiteScopeTestCase +public class ShardReduceIT extends GeoModulePluginIntegTestCase { + + private IndexRequestBuilder indexDoc(String date, int value) throws Exception { + return client().prepareIndex("idx") + .setSource( + jsonBuilder().startObject() + .field("value", value) + .field("ip", "10.0.0." + value) + .field("location", Geohash.stringEncode(5, 52, Geohash.PRECISION)) + .field("date", date) + .field("term-l", 1) + .field("term-d", 1.5) + .field("term-s", "term") + .startObject("nested") + .field("date", date) + .endObject() + .endObject() + ); + } + + @Override + public void setupSuiteScopeCluster() throws Exception { + assertAcked( + prepareCreate("idx").setMapping( + "nested", + "type=nested", + "ip", + "type=ip", + "location", + "type=geo_point", + "term-s", + "type=keyword" + ) + ); + + indexRandom(true, indexDoc("2014-01-01", 1), indexDoc("2014-01-02", 2), indexDoc("2014-01-04", 3)); + ensureSearchable(); + } + + public void testGeoHashGrid() throws Exception { + SearchResponse response = client().prepareSearch("idx") + .setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + AggregationBuilders.geohashGrid("grid") + .field("location") + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) + ) + .get(); + + assertSearchResponse(response); + + GeoGrid grid = response.getAggregations().get("grid"); + Histogram histo = grid.getBuckets().iterator().next().getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(4)); + } + + public void testGeoTileGrid() throws Exception { + SearchResponse response = client().prepareSearch("idx") + .setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + AggregationBuilders.geotileGrid("grid") + .field("location") + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) + ) + .get(); + + assertSearchResponse(response); + + GeoGrid grid = response.getAggregations().get("grid"); + Histogram histo = grid.getBuckets().iterator().next().getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(4)); + } +} diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoAggregatorTestCaseModulePlugin.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoAggregatorModulePluginTestCase.java similarity index 99% rename from modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoAggregatorTestCaseModulePlugin.java rename to modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoAggregatorModulePluginTestCase.java index 0065cca7d6101..92987d407f51d 100644 --- a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoAggregatorTestCaseModulePlugin.java +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoAggregatorModulePluginTestCase.java @@ -42,7 +42,7 @@ * to copy the code as we cannot depend on this class. * GitHub issue */ -public abstract class AbstractGeoAggregatorTestCaseModulePlugin extends GeoModulePluginIntegTestCase { +public abstract class AbstractGeoAggregatorModulePluginTestCase extends GeoModulePluginIntegTestCase { protected static final String SINGLE_VALUED_FIELD_NAME = "geo_value"; protected static final String MULTI_VALUED_FIELD_NAME = "geo_values"; diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsIT.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsITTestCase.java similarity index 99% rename from modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsIT.java rename to modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsITTestCase.java index 5cbd98a4936e4..8cc82da12d69a 100644 --- a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsIT.java +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsITTestCase.java @@ -57,7 +57,7 @@ import static org.opensearch.geo.tests.common.AggregationBuilders.geoBounds; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class GeoBoundsIT extends AbstractGeoAggregatorTestCaseModulePlugin { +public class GeoBoundsITTestCase extends AbstractGeoAggregatorModulePluginTestCase { private static final String aggName = "geoBounds"; public void testSingleValuedField() throws Exception { diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoCentroidITTestCase.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoCentroidITTestCase.java new file mode 100644 index 0000000000000..e6d45e27b8f70 --- /dev/null +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoCentroidITTestCase.java @@ -0,0 +1,84 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.geo.search.aggregations.metrics; + +import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.geo.GeoPoint; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoGrid; +import org.opensearch.geo.tests.common.AggregationBuilders; +import org.opensearch.search.aggregations.metrics.GeoCentroid; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.List; + +import static org.hamcrest.Matchers.closeTo; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; +import static org.opensearch.search.aggregations.AggregationBuilders.geoCentroid; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; + +@OpenSearchIntegTestCase.SuiteScopeTestCase +public class GeoCentroidITTestCase extends AbstractGeoAggregatorModulePluginTestCase { + private static final String aggName = "geoCentroid"; + + public void testSingleValueFieldAsSubAggToGeohashGrid() throws Exception { + SearchResponse response = client().prepareSearch(HIGH_CARD_IDX_NAME) + .addAggregation( + AggregationBuilders.geohashGrid("geoGrid") + .field(SINGLE_VALUED_FIELD_NAME) + .subAggregation(geoCentroid(aggName).field(SINGLE_VALUED_FIELD_NAME)) + ) + .get(); + assertSearchResponse(response); + + GeoGrid grid = response.getAggregations().get("geoGrid"); + assertThat(grid, notNullValue()); + assertThat(grid.getName(), equalTo("geoGrid")); + List buckets = grid.getBuckets(); + for (GeoGrid.Bucket cell : buckets) { + String geohash = cell.getKeyAsString(); + GeoPoint expectedCentroid = expectedCentroidsForGeoHash.get(geohash); + GeoCentroid centroidAgg = cell.getAggregations().get(aggName); + assertThat( + "Geohash " + geohash + " has wrong centroid latitude ", + expectedCentroid.lat(), + closeTo(centroidAgg.centroid().lat(), GEOHASH_TOLERANCE) + ); + assertThat( + "Geohash " + geohash + " has wrong centroid longitude", + expectedCentroid.lon(), + closeTo(centroidAgg.centroid().lon(), GEOHASH_TOLERANCE) + ); + } + } +} diff --git a/modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java b/modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java index 64aac66b7eef3..25dcf8db2c407 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java +++ b/modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java @@ -32,6 +32,12 @@ package org.opensearch.geo; +import org.opensearch.geo.search.aggregations.bucket.composite.GeoTileGridValuesSourceBuilder; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregator; +import org.opensearch.geo.search.aggregations.bucket.geogrid.InternalGeoHashGrid; +import org.opensearch.geo.search.aggregations.bucket.geogrid.InternalGeoTileGrid; import org.opensearch.geo.search.aggregations.metrics.GeoBounds; import org.opensearch.geo.search.aggregations.metrics.GeoBoundsAggregationBuilder; import org.opensearch.geo.search.aggregations.metrics.InternalGeoBounds; @@ -40,6 +46,7 @@ import org.opensearch.plugins.MapperPlugin; import org.opensearch.plugins.Plugin; import org.opensearch.plugins.SearchPlugin; +import org.opensearch.search.aggregations.bucket.composite.CompositeAggregation; import java.util.Collections; import java.util.List; @@ -57,11 +64,42 @@ public Map getMappers() { */ @Override public List getAggregations() { - final AggregationSpec spec = new AggregationSpec( + final AggregationSpec geoBounds = new AggregationSpec( GeoBoundsAggregationBuilder.NAME, GeoBoundsAggregationBuilder::new, GeoBoundsAggregationBuilder.PARSER ).addResultReader(InternalGeoBounds::new).setAggregatorRegistrar(GeoBoundsAggregationBuilder::registerAggregators); - return Collections.singletonList(spec); + + final AggregationSpec geoHashGrid = new AggregationSpec( + GeoHashGridAggregationBuilder.NAME, + GeoHashGridAggregationBuilder::new, + GeoHashGridAggregationBuilder.PARSER + ).addResultReader(InternalGeoHashGrid::new).setAggregatorRegistrar(GeoHashGridAggregationBuilder::registerAggregators); + + final AggregationSpec geoTileGrid = new AggregationSpec( + GeoTileGridAggregationBuilder.NAME, + GeoTileGridAggregationBuilder::new, + GeoTileGridAggregationBuilder.PARSER + ).addResultReader(InternalGeoTileGrid::new).setAggregatorRegistrar(GeoTileGridAggregationBuilder::registerAggregators); + return List.of(geoBounds, geoHashGrid, geoTileGrid); + } + + /** + * Registering the {@link GeoTileGridAggregator} in the {@link CompositeAggregation}. + * + * @return a {@link List} of {@link CompositeAggregationSpec} + */ + @Override + public List getCompositeAggregations() { + return Collections.singletonList( + new CompositeAggregationSpec( + GeoTileGridValuesSourceBuilder::register, + GeoTileGridValuesSourceBuilder.class, + GeoTileGridValuesSourceBuilder.COMPOSITE_AGGREGATION_SERIALISATION_BYTE_CODE, + GeoTileGridValuesSourceBuilder::new, + GeoTileGridValuesSourceBuilder::parse, + GeoTileGridValuesSourceBuilder.TYPE + ) + ); } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilder.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilder.java similarity index 87% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilder.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilder.java index 4b01a08d29a43..84d5943da287f 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilder.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilder.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.composite; +package org.opensearch.geo.search.aggregations.bucket.composite; import org.apache.lucene.index.IndexReader; import org.opensearch.LegacyESVersion; @@ -43,12 +43,15 @@ import org.opensearch.common.xcontent.ObjectParser; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.geo.search.aggregations.bucket.geogrid.CellIdSource; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.DocValueFormat; -import org.opensearch.search.aggregations.bucket.geogrid.CellIdSource; -import org.opensearch.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; -import org.opensearch.search.aggregations.bucket.geogrid.GeoTileUtils; +import org.opensearch.search.aggregations.bucket.composite.CompositeValuesSourceBuilder; +import org.opensearch.search.aggregations.bucket.composite.CompositeValuesSourceConfig; +import org.opensearch.search.aggregations.bucket.composite.CompositeValuesSourceParserHelper; +import org.opensearch.search.aggregations.bucket.GeoTileUtils; import org.opensearch.search.aggregations.bucket.missing.MissingOrder; import org.opensearch.search.aggregations.support.CoreValuesSourceType; import org.opensearch.search.aggregations.support.ValuesSource; @@ -88,13 +91,19 @@ CompositeValuesSourceConfig apply( ); } - static final String TYPE = "geotile_grid"; + public static final String TYPE = "geotile_grid"; + /* + use the TYPE parameter instead of Byte code. The byte code is added for backward compatibility and will be + removed in the next version. + */ + @Deprecated + public static final Byte COMPOSITE_AGGREGATION_SERIALISATION_BYTE_CODE = 3; static final ValuesSourceRegistry.RegistryKey REGISTRY_KEY = new ValuesSourceRegistry.RegistryKey( TYPE, GeoTileCompositeSuppier.class ); - private static final ObjectParser PARSER; + static final ObjectParser PARSER; static { PARSER = new ObjectParser<>(GeoTileGridValuesSourceBuilder.TYPE); PARSER.declareInt(GeoTileGridValuesSourceBuilder::precision, new ParseField("precision")); @@ -106,11 +115,11 @@ CompositeValuesSourceConfig apply( CompositeValuesSourceParserHelper.declareValuesSourceFields(PARSER); } - static GeoTileGridValuesSourceBuilder parse(String name, XContentParser parser) throws IOException { + public static GeoTileGridValuesSourceBuilder parse(String name, XContentParser parser) throws IOException { return PARSER.parse(parser, new GeoTileGridValuesSourceBuilder(name), null); } - static void register(ValuesSourceRegistry.Builder builder) { + public static void register(ValuesSourceRegistry.Builder builder) { builder.register( REGISTRY_KEY, @@ -163,7 +172,7 @@ static void register(ValuesSourceRegistry.Builder builder) { super(name); } - GeoTileGridValuesSourceBuilder(StreamInput in) throws IOException { + public GeoTileGridValuesSourceBuilder(StreamInput in) throws IOException { super(in); this.precision = in.readInt(); if (in.getVersion().onOrAfter(LegacyESVersion.V_7_6_0)) { @@ -203,7 +212,7 @@ protected void doXContentBody(XContentBuilder builder, Params params) throws IOE } @Override - String type() { + protected String type() { return TYPE; } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/GeoTileValuesSource.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileValuesSource.java similarity index 88% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/composite/GeoTileValuesSource.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileValuesSource.java index 819dfc573bbe4..303e577e99e7b 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/GeoTileValuesSource.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileValuesSource.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.composite; +package org.opensearch.geo.search.aggregations.bucket.composite; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; @@ -38,7 +38,9 @@ import org.opensearch.common.util.BigArrays; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.search.DocValueFormat; -import org.opensearch.search.aggregations.bucket.geogrid.GeoTileUtils; +import org.opensearch.search.aggregations.bucket.composite.LongValuesSource; +import org.opensearch.search.aggregations.bucket.composite.SingleDimensionValuesSource; +import org.opensearch.search.aggregations.bucket.GeoTileUtils; import org.opensearch.search.aggregations.bucket.missing.MissingOrder; import java.io.IOException; @@ -68,7 +70,7 @@ class GeoTileValuesSource extends LongValuesSource { } @Override - void setAfter(Comparable value) { + protected void setAfter(Comparable value) { if (missingBucket && value == null) { afterValue = null; } else if (value instanceof Number) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/BoundedCellValues.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BoundedCellValues.java similarity index 97% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/BoundedCellValues.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BoundedCellValues.java index ba824fc8f21dd..06d2dcaee3932 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/BoundedCellValues.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BoundedCellValues.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.common.geo.GeoBoundingBox; import org.opensearch.index.fielddata.MultiGeoPointValues; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/BucketPriorityQueue.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BucketPriorityQueue.java similarity index 96% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/BucketPriorityQueue.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BucketPriorityQueue.java index d6cfde0c46eae..70d0552b3e80b 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/BucketPriorityQueue.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BucketPriorityQueue.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.apache.lucene.util.PriorityQueue; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/CellIdSource.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/CellIdSource.java similarity index 98% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/CellIdSource.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/CellIdSource.java index 12d9043a2fd5f..d40029e9a762d 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/CellIdSource.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/CellIdSource.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/CellValues.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/CellValues.java similarity index 97% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/CellValues.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/CellValues.java index 9dc357659aae8..d01896c8136fa 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/CellValues.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/CellValues.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.index.fielddata.AbstractSortingNumericDocValues; import org.opensearch.index.fielddata.MultiGeoPointValues; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGrid.java similarity index 96% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoGrid.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGrid.java index cfdb08f9ee3d7..4ae888640efc8 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGrid.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.search.aggregations.bucket.MultiBucketsAggregation; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java similarity index 99% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java index b08c40268c5cf..4a904b3aa2b16 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoGridAggregator.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregator.java similarity index 99% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoGridAggregator.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregator.java index 1ef8ba6c697f4..909772c61a960 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoGridAggregator.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregator.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java similarity index 96% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java index 4049bf2c73640..bbaf9613fb216 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.common.geo.GeoBoundingBox; import org.opensearch.common.geo.GeoUtils; @@ -40,7 +40,7 @@ import org.opensearch.search.aggregations.AggregationBuilder; import org.opensearch.search.aggregations.AggregatorFactories; import org.opensearch.search.aggregations.AggregatorFactory; -import org.opensearch.search.aggregations.metrics.GeoGridAggregatorSupplier; +import org.opensearch.geo.search.aggregations.metrics.GeoGridAggregatorSupplier; import org.opensearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.opensearch.search.aggregations.support.ValuesSourceConfig; import org.opensearch.search.aggregations.support.ValuesSourceRegistry; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java similarity index 97% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java index 1106320c7431f..6ca7a4d8a9cb8 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorFactories; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java similarity index 98% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java index cdc801aaedffb..1914c07e831f7 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.common.geo.GeoBoundingBox; import org.opensearch.geometry.utils.Geohash; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java similarity index 95% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java index f73360e3cb826..76ad515f34fe5 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.common.geo.GeoBoundingBox; import org.opensearch.common.io.stream.StreamInput; @@ -39,7 +39,8 @@ import org.opensearch.search.aggregations.AggregationBuilder; import org.opensearch.search.aggregations.AggregatorFactories; import org.opensearch.search.aggregations.AggregatorFactory; -import org.opensearch.search.aggregations.metrics.GeoGridAggregatorSupplier; +import org.opensearch.geo.search.aggregations.metrics.GeoGridAggregatorSupplier; +import org.opensearch.search.aggregations.bucket.GeoTileUtils; import org.opensearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.opensearch.search.aggregations.support.ValuesSourceConfig; import org.opensearch.search.aggregations.support.ValuesSourceRegistry; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java similarity index 97% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java index 7a2b908148c4c..a205a9afde41e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorFactories; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java similarity index 97% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java index ef8cd11a22498..b830988a3d410 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.common.geo.GeoBoundingBox; import org.opensearch.index.query.QueryShardContext; @@ -40,6 +40,7 @@ import org.opensearch.search.aggregations.CardinalityUpperBound; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.NonCollectingAggregator; +import org.opensearch.search.aggregations.bucket.GeoTileUtils; import org.opensearch.search.aggregations.support.CoreValuesSourceType; import org.opensearch.search.aggregations.support.ValuesSource; import org.opensearch.search.aggregations.support.ValuesSourceAggregatorFactory; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGrid.java similarity index 99% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGrid.java index 94a5ad5717854..9dbed7b27307a 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGrid.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java similarity index 98% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java index a187bfefb661f..93fcdbd098400 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java similarity index 97% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java index 7811b8774d04f..ff1247300939a 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.search.aggregations.InternalAggregations; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java similarity index 96% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java index f9c45dc41ceb1..659909e868651 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.io.stream.StreamInput; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/InternalGeoTileGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoTileGrid.java similarity index 97% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/InternalGeoTileGrid.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoTileGrid.java index efbd9a05d6a4d..fa544b5893f0c 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/InternalGeoTileGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoTileGrid.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.search.aggregations.InternalAggregations; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/InternalGeoTileGridBucket.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoTileGridBucket.java similarity index 94% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/InternalGeoTileGridBucket.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoTileGridBucket.java index f200f55232e00..65d736cfceb32 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/InternalGeoTileGridBucket.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoTileGridBucket.java @@ -30,11 +30,12 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.search.aggregations.InternalAggregations; +import org.opensearch.search.aggregations.bucket.GeoTileUtils; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/ParsedGeoGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoGrid.java similarity index 97% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/ParsedGeoGrid.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoGrid.java index 3f85cf350c89c..adfffeddba59d 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/ParsedGeoGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoGrid.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.common.CheckedFunction; import org.opensearch.common.xcontent.ObjectParser; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/ParsedGeoGridBucket.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoGridBucket.java similarity index 96% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/ParsedGeoGridBucket.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoGridBucket.java index 08e5c15188ee6..80124cda50b19 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/ParsedGeoGridBucket.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoGridBucket.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.search.aggregations.Aggregation; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/ParsedGeoHashGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGrid.java similarity index 96% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/ParsedGeoHashGrid.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGrid.java index f20f972c1ce0a..109524e755c4d 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/ParsedGeoHashGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGrid.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.common.xcontent.ObjectParser; import org.opensearch.common.xcontent.XContentParser; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java similarity index 96% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java index 05c7a1c8d1663..4e6e454b08324 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.xcontent.XContentParser; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/ParsedGeoTileGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoTileGrid.java similarity index 96% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/ParsedGeoTileGrid.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoTileGrid.java index 06915cc4210e1..8734c96a15578 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/ParsedGeoTileGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoTileGrid.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.common.xcontent.ObjectParser; import org.opensearch.common.xcontent.XContentParser; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/ParsedGeoTileGridBucket.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoTileGridBucket.java similarity index 93% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/ParsedGeoTileGridBucket.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoTileGridBucket.java index c8dec16f322ef..fd47c35f13de1 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/ParsedGeoTileGridBucket.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoTileGridBucket.java @@ -30,10 +30,11 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.search.aggregations.bucket.GeoTileUtils; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/UnboundedCellValues.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/UnboundedCellValues.java similarity index 96% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/UnboundedCellValues.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/UnboundedCellValues.java index f5a139cdb8d9d..c628c7bfdc8ec 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/UnboundedCellValues.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/UnboundedCellValues.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.common.geo.GeoBoundingBox; import org.opensearch.index.fielddata.MultiGeoPointValues; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/package-info.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/package-info.java similarity index 79% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/package-info.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/package-info.java index c59685e06cf79..d9183a0f742ef 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/package-info.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/package-info.java @@ -7,4 +7,4 @@ */ /** geo_grid Aggregation package. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/GeoGridAggregatorSupplier.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoGridAggregatorSupplier.java similarity index 93% rename from server/src/main/java/org/opensearch/search/aggregations/metrics/GeoGridAggregatorSupplier.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoGridAggregatorSupplier.java index 183c64f4e4af2..43ccb8b89545a 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/GeoGridAggregatorSupplier.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoGridAggregatorSupplier.java @@ -30,13 +30,13 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.metrics; +package org.opensearch.geo.search.aggregations.metrics; import org.opensearch.common.geo.GeoBoundingBox; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoGridAggregator; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorFactories; import org.opensearch.search.aggregations.CardinalityUpperBound; -import org.opensearch.search.aggregations.bucket.geogrid.GeoGridAggregator; import org.opensearch.search.aggregations.support.ValuesSource; import org.opensearch.search.internal.SearchContext; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/GeoHashGridTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/GeoHashGridAggregationBuilderTests.java similarity index 72% rename from server/src/test/java/org/opensearch/search/aggregations/bucket/GeoHashGridTests.java rename to modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/GeoHashGridAggregationBuilderTests.java index 5e230a445ec98..00cb162e64c19 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/GeoHashGridTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/GeoHashGridAggregationBuilderTests.java @@ -30,14 +30,23 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket; +package org.opensearch.geo.search.aggregations.bucket; -import org.opensearch.common.geo.GeoBoundingBoxTests; +import org.opensearch.geo.GeoModulePlugin; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoGridAggregationBuilder; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; +import org.opensearch.geo.tests.common.RandomGeoGenerator; +import org.opensearch.plugins.Plugin; import org.opensearch.search.aggregations.BaseAggregationTestCase; -import org.opensearch.search.aggregations.bucket.geogrid.GeoGridAggregationBuilder; -import org.opensearch.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; -public class GeoHashGridTests extends BaseAggregationTestCase { +import java.util.Collection; +import java.util.Collections; + +public class GeoHashGridAggregationBuilderTests extends BaseAggregationTestCase { + + protected Collection> getPlugins() { + return Collections.singletonList(GeoModulePlugin.class); + } @Override protected GeoHashGridAggregationBuilder createTestAggregatorBuilder() { @@ -55,7 +64,7 @@ protected GeoHashGridAggregationBuilder createTestAggregatorBuilder() { factory.shardSize(randomIntBetween(1, Integer.MAX_VALUE)); } if (randomBoolean()) { - factory.setGeoBoundingBox(GeoBoundingBoxTests.randomBBox()); + factory.setGeoBoundingBox(RandomGeoGenerator.randomBBox()); } return factory; } diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/GeoTileGridTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/GeoTileGridAggregationBuilderTests.java similarity index 70% rename from server/src/test/java/org/opensearch/search/aggregations/bucket/GeoTileGridTests.java rename to modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/GeoTileGridAggregationBuilderTests.java index d54667fb4f1a6..c7c0be21273bd 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/GeoTileGridTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/GeoTileGridAggregationBuilderTests.java @@ -30,15 +30,24 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket; +package org.opensearch.geo.search.aggregations.bucket; -import org.opensearch.common.geo.GeoBoundingBoxTests; +import org.opensearch.geo.GeoModulePlugin; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoGridAggregationBuilder; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; +import org.opensearch.geo.tests.common.RandomGeoGenerator; +import org.opensearch.plugins.Plugin; import org.opensearch.search.aggregations.BaseAggregationTestCase; -import org.opensearch.search.aggregations.bucket.geogrid.GeoGridAggregationBuilder; -import org.opensearch.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; -import org.opensearch.search.aggregations.bucket.geogrid.GeoTileUtils; +import org.opensearch.search.aggregations.bucket.GeoTileUtils; -public class GeoTileGridTests extends BaseAggregationTestCase { +import java.util.Collection; +import java.util.Collections; + +public class GeoTileGridAggregationBuilderTests extends BaseAggregationTestCase { + + protected Collection> getPlugins() { + return Collections.singletonList(GeoModulePlugin.class); + } @Override protected GeoTileGridAggregationBuilder createTestAggregatorBuilder() { @@ -55,7 +64,7 @@ protected GeoTileGridAggregationBuilder createTestAggregatorBuilder() { factory.shardSize(randomIntBetween(1, Integer.MAX_VALUE)); } if (randomBoolean()) { - factory.setGeoBoundingBox(GeoBoundingBoxTests.randomBBox()); + factory.setGeoBoundingBox(RandomGeoGenerator.randomBBox()); } return factory; } diff --git a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridAggregationCompositeAggregatorTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridAggregationCompositeAggregatorTests.java new file mode 100644 index 0000000000000..3c7c292f9d193 --- /dev/null +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridAggregationCompositeAggregatorTests.java @@ -0,0 +1,174 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.geo.search.aggregations.bucket.composite; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.LatLonPoint; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.search.DocValuesFieldExistsQuery; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.junit.Before; +import org.opensearch.common.geo.GeoPoint; +import org.opensearch.geo.GeoModulePlugin; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregator; +import org.opensearch.index.mapper.GeoPointFieldMapper; +import org.opensearch.plugins.SearchPlugin; +import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; +import org.opensearch.search.aggregations.bucket.GeoTileUtils; +import org.opensearch.search.aggregations.composite.BaseCompositeAggregatorTestCase; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +/** + * Testing the {@link GeoTileGridAggregator} as part of CompositeAggregation. + */ +public class GeoTileGridAggregationCompositeAggregatorTests extends BaseCompositeAggregatorTestCase { + + protected List getSearchPlugins() { + return Collections.singletonList(new GeoModulePlugin()); + } + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + FIELD_TYPES.add(new GeoPointFieldMapper.GeoPointFieldType("geo_point")); + } + + public void testUnmappedFieldWithGeopoint() throws Exception { + final List>> dataset = new ArrayList<>(); + final String mappedFieldName = "geo_point"; + dataset.addAll( + Arrays.asList( + createDocument(mappedFieldName, new GeoPoint(48.934059, 41.610741)), + createDocument(mappedFieldName, new GeoPoint(-23.065941, 113.610741)), + createDocument(mappedFieldName, new GeoPoint(90.0, 0.0)), + createDocument(mappedFieldName, new GeoPoint(37.2343, -115.8067)), + createDocument(mappedFieldName, new GeoPoint(90.0, 0.0)) + ) + ); + + // just unmapped = no results + testSearchCase( + Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery(mappedFieldName)), + dataset, + () -> new CompositeAggregationBuilder("name", Arrays.asList(new GeoTileGridValuesSourceBuilder("unmapped").field("unmapped"))), + (result) -> assertEquals(0, result.getBuckets().size()) + ); + + // unmapped missing bucket = one result + testSearchCase( + Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery(mappedFieldName)), + dataset, + () -> new CompositeAggregationBuilder( + "name", + Arrays.asList(new GeoTileGridValuesSourceBuilder("unmapped").field("unmapped").missingBucket(true)) + ), + (result) -> { + assertEquals(1, result.getBuckets().size()); + assertEquals("{unmapped=null}", result.afterKey().toString()); + assertEquals("{unmapped=null}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(5L, result.getBuckets().get(0).getDocCount()); + } + ); + + // field + unmapped, no missing bucket = no results + testSearchCase( + Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery(mappedFieldName)), + dataset, + () -> new CompositeAggregationBuilder( + "name", + Arrays.asList( + new GeoTileGridValuesSourceBuilder(mappedFieldName).field(mappedFieldName), + new GeoTileGridValuesSourceBuilder("unmapped").field("unmapped") + ) + ), + (result) -> assertEquals(0, result.getBuckets().size()) + ); + + // field + unmapped with missing bucket = multiple results + testSearchCase( + Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery(mappedFieldName)), + dataset, + () -> new CompositeAggregationBuilder( + "name", + Arrays.asList( + new GeoTileGridValuesSourceBuilder(mappedFieldName).field(mappedFieldName), + new GeoTileGridValuesSourceBuilder("unmapped").field("unmapped").missingBucket(true) + ) + ), + (result) -> { + assertEquals(2, result.getBuckets().size()); + assertEquals("{geo_point=7/64/56, unmapped=null}", result.afterKey().toString()); + assertEquals("{geo_point=7/32/56, unmapped=null}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(0).getDocCount()); + assertEquals("{geo_point=7/64/56, unmapped=null}", result.getBuckets().get(1).getKeyAsString()); + assertEquals(3L, result.getBuckets().get(1).getDocCount()); + } + ); + + } + + public void testWithGeoPoint() throws Exception { + final List>> dataset = new ArrayList<>(); + dataset.addAll( + Arrays.asList( + createDocument("geo_point", new GeoPoint(48.934059, 41.610741)), + createDocument("geo_point", new GeoPoint(-23.065941, 113.610741)), + createDocument("geo_point", new GeoPoint(90.0, 0.0)), + createDocument("geo_point", new GeoPoint(37.2343, -115.8067)), + createDocument("geo_point", new GeoPoint(90.0, 0.0)) + ) + ); + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("geo_point")), dataset, () -> { + GeoTileGridValuesSourceBuilder geoTile = new GeoTileGridValuesSourceBuilder("geo_point").field("geo_point"); + return new CompositeAggregationBuilder("name", Collections.singletonList(geoTile)); + }, (result) -> { + assertEquals(2, result.getBuckets().size()); + assertEquals("{geo_point=7/64/56}", result.afterKey().toString()); + assertEquals("{geo_point=7/32/56}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(0).getDocCount()); + assertEquals("{geo_point=7/64/56}", result.getBuckets().get(1).getKeyAsString()); + assertEquals(3L, result.getBuckets().get(1).getDocCount()); + }); + + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("geo_point")), dataset, () -> { + GeoTileGridValuesSourceBuilder geoTile = new GeoTileGridValuesSourceBuilder("geo_point").field("geo_point"); + return new CompositeAggregationBuilder("name", Collections.singletonList(geoTile)).aggregateAfter( + Collections.singletonMap("geo_point", "7/32/56") + ); + }, (result) -> { + assertEquals(1, result.getBuckets().size()); + assertEquals("{geo_point=7/64/56}", result.afterKey().toString()); + assertEquals("{geo_point=7/64/56}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(3L, result.getBuckets().get(0).getDocCount()); + }); + } + + @Override + protected boolean addValueToDocument(final Document doc, final String name, final Object value) { + if (value instanceof GeoPoint) { + GeoPoint point = (GeoPoint) value; + doc.add( + new SortedNumericDocValuesField( + name, + GeoTileUtils.longEncode(point.lon(), point.lat(), GeoTileGridAggregationBuilder.DEFAULT_PRECISION) + ) + ); + doc.add(new LatLonPoint(name, point.lat(), point.lon())); + return true; + } + return false; + } +} diff --git a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridCompositeAggregationBuilderTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridCompositeAggregationBuilderTests.java new file mode 100644 index 0000000000000..ea7a2a83945c2 --- /dev/null +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridCompositeAggregationBuilderTests.java @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.geo.search.aggregations.bucket.composite; + +import org.opensearch.geo.GeoModulePlugin; +import org.opensearch.geo.tests.common.RandomGeoGenerator; +import org.opensearch.plugins.Plugin; +import org.opensearch.search.aggregations.BaseAggregationTestCase; +import org.opensearch.search.aggregations.bucket.GeoTileUtils; +import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; +import org.opensearch.search.aggregations.bucket.composite.CompositeValuesSourceBuilder; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +public class GeoTileGridCompositeAggregationBuilderTests extends BaseAggregationTestCase { + + protected Collection> getPlugins() { + return Collections.singletonList(GeoModulePlugin.class); + } + + private GeoTileGridValuesSourceBuilder randomGeoTileGridValuesSourceBuilder() { + GeoTileGridValuesSourceBuilder geoTile = new GeoTileGridValuesSourceBuilder(randomAlphaOfLengthBetween(5, 10)); + if (randomBoolean()) { + geoTile.precision(randomIntBetween(0, GeoTileUtils.MAX_ZOOM)); + } + if (randomBoolean()) { + geoTile.geoBoundingBox(RandomGeoGenerator.randomBBox()); + } + return geoTile; + } + + @Override + protected CompositeAggregationBuilder createTestAggregatorBuilder() { + int numSources = randomIntBetween(1, 10); + List> sources = new ArrayList<>(); + for (int i = 0; i < numSources; i++) { + sources.add(randomGeoTileGridValuesSourceBuilder()); + } + return new CompositeAggregationBuilder(randomAlphaOfLength(10), sources); + } +} diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilderTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilderTests.java similarity index 90% rename from server/src/test/java/org/opensearch/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilderTests.java rename to modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilderTests.java index 2b1700676f549..c6276c06c4511 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilderTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilderTests.java @@ -30,8 +30,9 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.composite; +package org.opensearch.geo.search.aggregations.bucket.composite; +import org.opensearch.search.aggregations.bucket.composite.CompositeValuesSourceBuilder; import org.opensearch.test.OpenSearchTestCase; public class GeoTileGridValuesSourceBuilderTests extends OpenSearchTestCase { diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java similarity index 95% rename from server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java rename to modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java index 17fddb8978499..d6153637f656d 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.apache.lucene.document.LatLonDocValuesField; import org.apache.lucene.document.SortedSetDocValuesField; @@ -45,17 +45,19 @@ import org.apache.lucene.util.BytesRef; import org.opensearch.common.CheckedConsumer; import org.opensearch.common.geo.GeoBoundingBox; -import org.opensearch.common.geo.GeoBoundingBoxTests; import org.opensearch.common.geo.GeoUtils; +import org.opensearch.geo.GeoModulePlugin; +import org.opensearch.geo.tests.common.AggregationInspectionHelper; +import org.opensearch.geo.tests.common.RandomGeoGenerator; import org.opensearch.index.mapper.GeoPointFieldMapper; import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.plugins.SearchPlugin; import org.opensearch.search.aggregations.Aggregation; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorTestCase; import org.opensearch.search.aggregations.MultiBucketConsumerService; import org.opensearch.search.aggregations.bucket.terms.StringTerms; import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder; -import org.opensearch.search.aggregations.support.AggregationInspectionHelper; import java.io.IOException; import java.util.ArrayList; @@ -91,6 +93,16 @@ public abstract class GeoGridAggregatorTestCase */ protected abstract GeoGridAggregationBuilder createBuilder(String name); + /** + * Overriding the Search Plugins list with {@link GeoModulePlugin} so that the testcase will know that this plugin is + * to be loaded during the tests. + * @return List of {@link SearchPlugin} + */ + @Override + protected List getSearchPlugins() { + return Collections.singletonList(new GeoModulePlugin()); + } + public void testNoDocs() throws IOException { testCase( new MatchAllDocsQuery(), @@ -225,7 +237,7 @@ public void testBounds() throws IOException { // only consider bounding boxes that are at least GEOHASH_TOLERANCE wide and have quantized coordinates GeoBoundingBox bbox = randomValueOtherThanMany( (b) -> Math.abs(GeoUtils.normalizeLon(b.right()) - GeoUtils.normalizeLon(b.left())) < GEOHASH_TOLERANCE, - GeoBoundingBoxTests::randomBBox + RandomGeoGenerator::randomBBox ); Function encodeDecodeLat = (lat) -> GeoEncodingUtils.decodeLatitude(GeoEncodingUtils.encodeLatitude(lat)); Function encodeDecodeLon = (lon) -> GeoEncodingUtils.decodeLongitude(GeoEncodingUtils.encodeLongitude(lon)); diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoGridTestCase.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridTestCase.java similarity index 79% rename from server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoGridTestCase.java rename to modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridTestCase.java index ce286a4443660..432736a2b43fe 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoGridTestCase.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridTestCase.java @@ -29,9 +29,16 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.apache.lucene.index.IndexWriter; +import org.opensearch.common.ParseField; +import org.opensearch.common.xcontent.ContextParser; +import org.opensearch.common.xcontent.NamedXContentRegistry; +import org.opensearch.geo.GeoModulePlugin; +import org.opensearch.geo.search.aggregations.metrics.ParsedGeoBounds; +import org.opensearch.plugins.SearchPlugin; +import org.opensearch.search.aggregations.Aggregation; import org.opensearch.search.aggregations.InternalAggregations; import org.opensearch.search.aggregations.ParsedMultiBucketAggregation; import org.opensearch.test.InternalMultiBucketAggregationTestCase; @@ -76,6 +83,36 @@ protected int maxNumberOfBuckets() { return 3; } + /** + * Overriding the method so that tests can get the aggregation specs for namedWriteable. + * + * @return GeoPlugin + */ + @Override + protected SearchPlugin registerPlugin() { + return new GeoModulePlugin(); + } + + /** + * Overriding with the {@link ParsedGeoBounds} so that it can be parsed. We need to do this as {@link GeoModulePlugin} + * is registering this Aggregation. + * + * @return a List of {@link NamedXContentRegistry.Entry} + */ + @Override + protected List getNamedXContents() { + final List namedXContents = new ArrayList<>(getDefaultNamedXContents()); + final ContextParser hashGridParser = (p, c) -> ParsedGeoHashGrid.fromXContent(p, (String) c); + final ContextParser geoTileParser = (p, c) -> ParsedGeoTileGrid.fromXContent(p, (String) c); + namedXContents.add( + new NamedXContentRegistry.Entry(Aggregation.class, new ParseField(GeoHashGridAggregationBuilder.NAME), hashGridParser) + ); + namedXContents.add( + new NamedXContentRegistry.Entry(Aggregation.class, new ParseField(GeoTileGridAggregationBuilder.NAME), geoTileParser) + ); + return namedXContents; + } + @Override protected T createTestInstance(String name, Map metadata, InternalAggregations aggregations) { final int precision = randomPrecision(); diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorTests.java similarity index 96% rename from server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorTests.java rename to modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorTests.java index 5c63b15c7f614..04fa815366f6b 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorTests.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import static org.opensearch.geometry.utils.Geohash.stringEncode; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoHashGridParserTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridParserTests.java similarity index 99% rename from server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoHashGridParserTests.java rename to modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridParserTests.java index e81e22b3b562f..44f292e898a61 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoHashGridParserTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridParserTests.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.greaterThanOrEqualTo; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoHashGridTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridTests.java similarity index 97% rename from server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoHashGridTests.java rename to modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridTests.java index 5a26ec759281c..c84c6ef5ec076 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoHashGridTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridTests.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.geometry.utils.Geohash; import org.opensearch.search.aggregations.InternalAggregations; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileGridAggregatorTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorTests.java similarity index 94% rename from server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileGridAggregatorTests.java rename to modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorTests.java index 4e88111ac2dfc..f2f641ea794c0 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileGridAggregatorTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorTests.java @@ -30,7 +30,9 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; + +import org.opensearch.search.aggregations.bucket.GeoTileUtils; public class GeoTileGridAggregatorTests extends GeoGridAggregatorTestCase { diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileGridParserTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridParserTests.java similarity index 97% rename from server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileGridParserTests.java rename to modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridParserTests.java index 567bcd57d23e5..a5b000d5e6ab3 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileGridParserTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridParserTests.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.ExceptionsHelper; import org.opensearch.common.xcontent.XContentParseException; @@ -37,6 +37,7 @@ import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.geo.GeometryTestUtils; import org.opensearch.geometry.Rectangle; +import org.opensearch.search.aggregations.bucket.GeoTileUtils; import org.opensearch.test.OpenSearchTestCase; import static org.hamcrest.Matchers.containsString; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileGridTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridTests.java similarity index 94% rename from server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileGridTests.java rename to modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridTests.java index 50b9a8cd762d1..ead67e0455d94 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileGridTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridTests.java @@ -29,9 +29,10 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.search.aggregations.InternalAggregations; +import org.opensearch.search.aggregations.bucket.GeoTileUtils; import java.util.List; import java.util.Map; diff --git a/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationBuilders.java b/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationBuilders.java index c1f27b71c326d..c0d7e51047c6b 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationBuilders.java +++ b/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationBuilders.java @@ -8,6 +8,10 @@ package org.opensearch.geo.tests.common; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; +import org.opensearch.geo.search.aggregations.bucket.geogrid.InternalGeoHashGrid; +import org.opensearch.geo.search.aggregations.bucket.geogrid.InternalGeoTileGrid; import org.opensearch.geo.search.aggregations.metrics.GeoBounds; import org.opensearch.geo.search.aggregations.metrics.GeoBoundsAggregationBuilder; @@ -18,4 +22,18 @@ public class AggregationBuilders { public static GeoBoundsAggregationBuilder geoBounds(String name) { return new GeoBoundsAggregationBuilder(name); } + + /** + * Create a new {@link InternalGeoHashGrid} aggregation with the given name. + */ + public static GeoHashGridAggregationBuilder geohashGrid(String name) { + return new GeoHashGridAggregationBuilder(name); + } + + /** + * Create a new {@link InternalGeoTileGrid} aggregation with the given name. + */ + public static GeoTileGridAggregationBuilder geotileGrid(String name) { + return new GeoTileGridAggregationBuilder(name); + } } diff --git a/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationInspectionHelper.java b/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationInspectionHelper.java index 208187bf34a5c..3473cf2d94b76 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationInspectionHelper.java +++ b/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationInspectionHelper.java @@ -8,6 +8,7 @@ package org.opensearch.geo.tests.common; +import org.opensearch.geo.search.aggregations.bucket.geogrid.InternalGeoGrid; import org.opensearch.geo.search.aggregations.metrics.InternalGeoBounds; public class AggregationInspectionHelper { @@ -15,4 +16,8 @@ public class AggregationInspectionHelper { public static boolean hasValue(InternalGeoBounds agg) { return (agg.topLeft() == null && agg.bottomRight() == null) == false; } + + public static boolean hasValue(InternalGeoGrid agg) { + return agg.getBuckets().stream().anyMatch(bucket -> bucket.getDocCount() > 0); + } } diff --git a/modules/geo/src/test/java/org/opensearch/geo/tests/common/RandomGeoGenerator.java b/modules/geo/src/test/java/org/opensearch/geo/tests/common/RandomGeoGenerator.java index 2cf32c36b97ec..2fb403155e2bc 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/tests/common/RandomGeoGenerator.java +++ b/modules/geo/src/test/java/org/opensearch/geo/tests/common/RandomGeoGenerator.java @@ -8,7 +8,10 @@ package org.opensearch.geo.tests.common; +import org.opensearch.common.geo.GeoBoundingBox; import org.opensearch.common.geo.GeoPoint; +import org.opensearch.geo.GeometryTestUtils; +import org.opensearch.geometry.Rectangle; import java.util.Random; @@ -83,4 +86,12 @@ private static double normalizeLongitude(double longitude) { return -180 + off; } } + + public static GeoBoundingBox randomBBox() { + Rectangle rectangle = GeometryTestUtils.randomRectangle(); + return new GeoBoundingBox( + new GeoPoint(rectangle.getMaxLat(), rectangle.getMinLon()), + new GeoPoint(rectangle.getMinLat(), rectangle.getMaxLon()) + ); + } } diff --git a/modules/geo/src/yamlRestTest/resources/rest-api-spec/test/geo_shape/230_composite.yml b/modules/geo/src/yamlRestTest/resources/rest-api-spec/test/geo_shape/230_composite.yml new file mode 100644 index 0000000000000..211f3c3f46b88 --- /dev/null +++ b/modules/geo/src/yamlRestTest/resources/rest-api-spec/test/geo_shape/230_composite.yml @@ -0,0 +1,168 @@ +--- +setup: + - do: + indices.create: + index: test + body: + mappings: + properties: + date: + type: date + keyword: + type: keyword + long: + type: long + geo_point: + type: geo_point + nested: + type: nested + properties: + nested_long: + type: long + + - do: + indices.create: + index: other + body: + mappings: + properties: + date: + type: date + long: + type: long + nested: + type: nested + properties: + nested_long: + type: long + + - do: + index: + index: test + id: 1 + body: { "keyword": "foo", "long": [10, 20], "geo_point": "37.2343,-115.8067", "nested": [{"nested_long": 10}, {"nested_long": 20}] } + + - do: + index: + index: test + id: 2 + body: { "keyword": ["foo", "bar"], "geo_point": "41.12,-71.34" } + + - do: + index: + index: test + id: 3 + body: { "keyword": "bar", "long": [100, 0], "geo_point": "90.0,0.0", "nested": [{"nested_long": 10}, {"nested_long": 0}] } + + - do: + index: + index: test + id: 4 + body: { "keyword": "bar", "long": [1000, 0], "geo_point": "41.12,-71.34", "nested": [{"nested_long": 1000}, {"nested_long": 20}] } + + - do: + index: + index: test + id: 5 + body: { "date": "2017-10-20T03:08:45" } + + - do: + index: + index: test + id: 6 + body: { "date": "2017-10-21T07:00:00" } + + - do: + index: + index: other + id: 0 + body: { "date": "2017-10-20T03:08:45" } + + - do: + indices.refresh: + index: [test, other] +--- +"Simple Composite aggregation with GeoTile grid": + - skip: + version: " - 7.4.99" + reason: geotile_grid is not supported until 7.5.0 + - do: + search: + rest_total_hits_as_int: true + index: test + body: + aggregations: + test: + composite: + sources: [ + "geo": { + "geotile_grid": { + "field": "geo_point", + "precision": 12 + } + }, + { + "kw": { + "terms": { + "field": "keyword" + } + } + } + ] + + - match: {hits.total: 6} + - length: { aggregations.test.buckets: 4 } + - match: { aggregations.test.buckets.0.key.geo: "12/730/1590" } + - match: { aggregations.test.buckets.0.key.kw: "foo" } + - match: { aggregations.test.buckets.0.doc_count: 1 } + - match: { aggregations.test.buckets.1.key.geo: "12/1236/1533" } + - match: { aggregations.test.buckets.1.key.kw: "bar" } + - match: { aggregations.test.buckets.1.doc_count: 2 } + - match: { aggregations.test.buckets.2.key.geo: "12/1236/1533" } + - match: { aggregations.test.buckets.2.key.kw: "foo" } + - match: { aggregations.test.buckets.2.doc_count: 1 } + - match: { aggregations.test.buckets.3.key.geo: "12/2048/0" } + - match: { aggregations.test.buckets.3.key.kw: "bar" } + - match: { aggregations.test.buckets.3.doc_count: 1 } + +--- +"Simple Composite aggregation with geotile grid add aggregate after": + - skip: + version: " - 7.4.99" + reason: geotile_grid is not supported until 7.5.0 + - do: + search: + index: test + body: + aggregations: + test: + composite: + sources: [ + "geo": { + "geotile_grid": { + "field": "geo_point", + "precision": 12 + } + }, + { + "kw": { + "terms": { + "field": "keyword" + } + } + } + ] + after: { "geo": "12/730/1590", "kw": "foo" } + + - match: { hits.total.value: 6 } + - match: { hits.total.relation: "eq" } + - length: { aggregations.test.buckets: 3 } + - match: { aggregations.test.buckets.0.key.geo: "12/1236/1533" } + - match: { aggregations.test.buckets.0.key.kw: "bar" } + - match: { aggregations.test.buckets.0.doc_count: 2 } + - match: { aggregations.test.buckets.1.key.geo: "12/1236/1533" } + - match: { aggregations.test.buckets.1.key.kw: "foo" } + - match: { aggregations.test.buckets.1.doc_count: 1 } + - match: { aggregations.test.buckets.2.key.geo: "12/2048/0" } + - match: { aggregations.test.buckets.2.key.kw: "bar" } + - match: { aggregations.test.buckets.2.doc_count: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/280_geohash_grid.yml b/modules/geo/src/yamlRestTest/resources/rest-api-spec/test/geo_shape/280_geohash_grid.yml similarity index 100% rename from rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/280_geohash_grid.yml rename to modules/geo/src/yamlRestTest/resources/rest-api-spec/test/geo_shape/280_geohash_grid.yml diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/290_geotile_grid.yml b/modules/geo/src/yamlRestTest/resources/rest-api-spec/test/geo_shape/290_geotile_grid.yml similarity index 100% rename from rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/290_geotile_grid.yml rename to modules/geo/src/yamlRestTest/resources/rest-api-spec/test/geo_shape/290_geotile_grid.yml diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml index 2e298441918bc..09278690f5d05 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml @@ -651,92 +651,6 @@ setup: } ] ---- -"Simple Composite aggregation with GeoTile grid": - - skip: - version: " - 7.4.99" - reason: geotile_grid is not supported until 7.5.0 - - do: - search: - rest_total_hits_as_int: true - index: test - body: - aggregations: - test: - composite: - sources: [ - "geo": { - "geotile_grid": { - "field": "geo_point", - "precision": 12 - } - }, - { - "kw": { - "terms": { - "field": "keyword" - } - } - } - ] - - - match: {hits.total: 6} - - length: { aggregations.test.buckets: 4 } - - match: { aggregations.test.buckets.0.key.geo: "12/730/1590" } - - match: { aggregations.test.buckets.0.key.kw: "foo" } - - match: { aggregations.test.buckets.0.doc_count: 1 } - - match: { aggregations.test.buckets.1.key.geo: "12/1236/1533" } - - match: { aggregations.test.buckets.1.key.kw: "bar" } - - match: { aggregations.test.buckets.1.doc_count: 2 } - - match: { aggregations.test.buckets.2.key.geo: "12/1236/1533" } - - match: { aggregations.test.buckets.2.key.kw: "foo" } - - match: { aggregations.test.buckets.2.doc_count: 1 } - - match: { aggregations.test.buckets.3.key.geo: "12/2048/0" } - - match: { aggregations.test.buckets.3.key.kw: "bar" } - - match: { aggregations.test.buckets.3.doc_count: 1 } - ---- -"Simple Composite aggregation with geotile grid add aggregate after": - - skip: - version: " - 7.4.99" - reason: geotile_grid is not supported until 7.5.0 - - do: - search: - index: test - body: - aggregations: - test: - composite: - sources: [ - "geo": { - "geotile_grid": { - "field": "geo_point", - "precision": 12 - } - }, - { - "kw": { - "terms": { - "field": "keyword" - } - } - } - ] - after: { "geo": "12/730/1590", "kw": "foo" } - - - match: { hits.total.value: 6 } - - match: { hits.total.relation: "eq" } - - length: { aggregations.test.buckets: 3 } - - match: { aggregations.test.buckets.0.key.geo: "12/1236/1533" } - - match: { aggregations.test.buckets.0.key.kw: "bar" } - - match: { aggregations.test.buckets.0.doc_count: 2 } - - match: { aggregations.test.buckets.1.key.geo: "12/1236/1533" } - - match: { aggregations.test.buckets.1.key.kw: "foo" } - - match: { aggregations.test.buckets.1.doc_count: 1 } - - match: { aggregations.test.buckets.2.key.geo: "12/2048/0" } - - match: { aggregations.test.buckets.2.key.kw: "bar" } - - match: { aggregations.test.buckets.2.doc_count: 1 } - --- "Mixed ip and unmapped fields": - skip: diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardReduceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardReduceIT.java index 7352dc7170a21..faa6a54394b00 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardReduceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardReduceIT.java @@ -37,7 +37,6 @@ import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.opensearch.search.aggregations.bucket.filter.Filter; -import org.opensearch.search.aggregations.bucket.geogrid.GeoGrid; import org.opensearch.search.aggregations.bucket.global.Global; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.opensearch.search.aggregations.bucket.histogram.Histogram; @@ -51,8 +50,6 @@ import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; import static org.opensearch.search.aggregations.AggregationBuilders.dateRange; import static org.opensearch.search.aggregations.AggregationBuilders.filter; -import static org.opensearch.search.aggregations.AggregationBuilders.geohashGrid; -import static org.opensearch.search.aggregations.AggregationBuilders.geotileGrid; import static org.opensearch.search.aggregations.AggregationBuilders.global; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.ipRange; @@ -338,36 +335,4 @@ public void testDateHistogram() throws Exception { } - public void testGeoHashGrid() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .setQuery(QueryBuilders.matchAllQuery()) - .addAggregation( - geohashGrid("grid").field("location") - .subAggregation(dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.DAY).minDocCount(0)) - ) - .get(); - - assertSearchResponse(response); - - GeoGrid grid = response.getAggregations().get("grid"); - Histogram histo = grid.getBuckets().iterator().next().getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(4)); - } - - public void testGeoTileGrid() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .setQuery(QueryBuilders.matchAllQuery()) - .addAggregation( - geotileGrid("grid").field("location") - .subAggregation(dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.DAY).minDocCount(0)) - ) - .get(); - - assertSearchResponse(response); - - GeoGrid grid = response.getAggregations().get("grid"); - Histogram histo = grid.getBuckets().iterator().next().getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(4)); - } - } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/GeoCentroidIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/GeoCentroidIT.java index 7cd8b3ed39051..ffc31b7cdb7c4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/GeoCentroidIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/GeoCentroidIT.java @@ -35,15 +35,11 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.geo.GeoPoint; import org.opensearch.search.aggregations.InternalAggregation; -import org.opensearch.search.aggregations.bucket.geogrid.GeoGrid; import org.opensearch.search.aggregations.bucket.global.Global; import org.opensearch.test.OpenSearchIntegTestCase; -import java.util.List; - import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.search.aggregations.AggregationBuilders.geoCentroid; -import static org.opensearch.search.aggregations.AggregationBuilders.geohashGrid; import static org.opensearch.search.aggregations.AggregationBuilders.global; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.closeTo; @@ -168,33 +164,4 @@ public void testMultiValuedField() throws Exception { assertThat(centroid.lon(), closeTo(multiCentroid.lon(), GEOHASH_TOLERANCE)); assertEquals(2 * numDocs, geoCentroid.count()); } - - public void testSingleValueFieldAsSubAggToGeohashGrid() throws Exception { - SearchResponse response = client().prepareSearch(HIGH_CARD_IDX_NAME) - .addAggregation( - geohashGrid("geoGrid").field(SINGLE_VALUED_FIELD_NAME).subAggregation(geoCentroid(aggName).field(SINGLE_VALUED_FIELD_NAME)) - ) - .get(); - assertSearchResponse(response); - - GeoGrid grid = response.getAggregations().get("geoGrid"); - assertThat(grid, notNullValue()); - assertThat(grid.getName(), equalTo("geoGrid")); - List buckets = grid.getBuckets(); - for (GeoGrid.Bucket cell : buckets) { - String geohash = cell.getKeyAsString(); - GeoPoint expectedCentroid = expectedCentroidsForGeoHash.get(geohash); - GeoCentroid centroidAgg = cell.getAggregations().get(aggName); - assertThat( - "Geohash " + geohash + " has wrong centroid latitude ", - expectedCentroid.lat(), - closeTo(centroidAgg.centroid().lat(), GEOHASH_TOLERANCE) - ); - assertThat( - "Geohash " + geohash + " has wrong centroid longitude", - expectedCentroid.lon(), - closeTo(centroidAgg.centroid().lon(), GEOHASH_TOLERANCE) - ); - } - } } diff --git a/server/src/main/java/org/opensearch/plugins/SearchPlugin.java b/server/src/main/java/org/opensearch/plugins/SearchPlugin.java index a743360e1e90c..af7d4fc2e9fe5 100644 --- a/server/src/main/java/org/opensearch/plugins/SearchPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/SearchPlugin.java @@ -55,6 +55,9 @@ import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.PipelineAggregationBuilder; +import org.opensearch.search.aggregations.bucket.composite.CompositeAggregation; +import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationParsingFunction; +import org.opensearch.search.aggregations.bucket.composite.CompositeValuesSourceBuilder; import org.opensearch.search.aggregations.bucket.terms.SignificantTerms; import org.opensearch.search.aggregations.bucket.terms.heuristic.SignificanceHeuristic; import org.opensearch.search.aggregations.pipeline.MovAvgModel; @@ -172,6 +175,15 @@ default List> getAggregationExtentions() return emptyList(); } + /** + * Allows plugins to register new Aggregation in the {@link CompositeAggregation}. + * + * @return A {@link List} of {@link CompositeAggregationSpec} + */ + default List getCompositeAggregations() { + return emptyList(); + } + /** * The new {@link PipelineAggregator}s added by this plugin. */ @@ -532,6 +544,76 @@ public AggregationSpec setAggregatorRegistrar(Consumer aggregatorRegistrar; + private final Class valueSourceBuilderClass; + @Deprecated + /** This is added for backward compatibility, you don't need to set it, as we use aggregationType instead of + * byte code + */ + private Byte byteCode; + private final CompositeAggregationParsingFunction parsingFunction; + private final String aggregationType; + private final Writeable.Reader> reader; + + /** + * Specification for registering an aggregation in Composite Aggregation + * + * @param aggregatorRegistrar function to register the + * {@link org.opensearch.search.aggregations.support.ValuesSource} to aggregator mappings for Composite + * aggregation + * @param valueSourceBuilderClass ValueSourceBuilder class name which is building the aggregation + * @param byteCode byte code which is used in serialisation and de-serialisation to indentify which + * aggregation builder to use + * @param reader Typically, a reference to a constructor that takes a {@link StreamInput}, which is + * registered with the aggregation + * @param parsingFunction a reference function which will be used to parse the Aggregation input. + * @param aggregationType a {@link String} defined in the AggregationBuilder as type. + */ + public CompositeAggregationSpec( + final Consumer aggregatorRegistrar, + final Class> valueSourceBuilderClass, + final Byte byteCode, + final Writeable.Reader> reader, + final CompositeAggregationParsingFunction parsingFunction, + final String aggregationType + ) { + this.aggregatorRegistrar = aggregatorRegistrar; + this.valueSourceBuilderClass = valueSourceBuilderClass; + this.byteCode = byteCode; + this.parsingFunction = parsingFunction; + this.aggregationType = aggregationType; + this.reader = reader; + } + + public Consumer getAggregatorRegistrar() { + return aggregatorRegistrar; + } + + public Class getValueSourceBuilderClass() { + return valueSourceBuilderClass; + } + + public Byte getByteCode() { + return byteCode; + } + + public CompositeAggregationParsingFunction getParsingFunction() { + return parsingFunction; + } + + public String getAggregationType() { + return aggregationType; + } + + public Writeable.Reader> getReader() { + return reader; + } + } + /** * Specification for a {@link PipelineAggregator}. */ diff --git a/server/src/main/java/org/opensearch/search/DocValueFormat.java b/server/src/main/java/org/opensearch/search/DocValueFormat.java index 7e7e4f83334f5..84c46e400543a 100644 --- a/server/src/main/java/org/opensearch/search/DocValueFormat.java +++ b/server/src/main/java/org/opensearch/search/DocValueFormat.java @@ -47,7 +47,7 @@ import org.opensearch.common.time.DateUtils; import org.opensearch.geometry.utils.Geohash; import org.opensearch.index.mapper.DateFieldMapper; -import org.opensearch.search.aggregations.bucket.geogrid.GeoTileUtils; +import org.opensearch.search.aggregations.bucket.GeoTileUtils; import java.io.IOException; import java.math.BigInteger; diff --git a/server/src/main/java/org/opensearch/search/SearchModule.java b/server/src/main/java/org/opensearch/search/SearchModule.java index 80e025a3651a8..0149f9a025bcd 100644 --- a/server/src/main/java/org/opensearch/search/SearchModule.java +++ b/server/src/main/java/org/opensearch/search/SearchModule.java @@ -126,10 +126,6 @@ import org.opensearch.search.aggregations.bucket.filter.FiltersAggregationBuilder; import org.opensearch.search.aggregations.bucket.filter.InternalFilter; import org.opensearch.search.aggregations.bucket.filter.InternalFilters; -import org.opensearch.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; -import org.opensearch.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; -import org.opensearch.search.aggregations.bucket.geogrid.InternalGeoHashGrid; -import org.opensearch.search.aggregations.bucket.geogrid.InternalGeoTileGrid; import org.opensearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.opensearch.search.aggregations.bucket.global.InternalGlobal; import org.opensearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder; @@ -628,22 +624,6 @@ private ValuesSourceRegistry registerAggregations(List plugins) { ).addResultReader(InternalGeoDistance::new).setAggregatorRegistrar(GeoDistanceAggregationBuilder::registerAggregators), builder ); - registerAggregation( - new AggregationSpec( - GeoHashGridAggregationBuilder.NAME, - GeoHashGridAggregationBuilder::new, - GeoHashGridAggregationBuilder.PARSER - ).addResultReader(InternalGeoHashGrid::new).setAggregatorRegistrar(GeoHashGridAggregationBuilder::registerAggregators), - builder - ); - registerAggregation( - new AggregationSpec( - GeoTileGridAggregationBuilder.NAME, - GeoTileGridAggregationBuilder::new, - GeoTileGridAggregationBuilder.PARSER - ).addResultReader(InternalGeoTileGrid::new).setAggregatorRegistrar(GeoTileGridAggregationBuilder::registerAggregators), - builder - ); registerAggregation( new AggregationSpec(NestedAggregationBuilder.NAME, NestedAggregationBuilder::new, NestedAggregationBuilder::parse) .addResultReader(InternalNested::new), @@ -681,7 +661,7 @@ private ValuesSourceRegistry registerAggregations(List plugins) { registerAggregation( new AggregationSpec(CompositeAggregationBuilder.NAME, CompositeAggregationBuilder::new, CompositeAggregationBuilder.PARSER) .addResultReader(InternalComposite::new) - .setAggregatorRegistrar(CompositeAggregationBuilder::registerAggregators), + .setAggregatorRegistrar(reg -> CompositeAggregationBuilder.registerAggregators(reg, plugins)), builder ); registerAggregation( diff --git a/server/src/main/java/org/opensearch/search/aggregations/AggregationBuilders.java b/server/src/main/java/org/opensearch/search/aggregations/AggregationBuilders.java index 382455093309d..9886e423bbc76 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/AggregationBuilders.java +++ b/server/src/main/java/org/opensearch/search/aggregations/AggregationBuilders.java @@ -43,10 +43,6 @@ import org.opensearch.search.aggregations.bucket.filter.Filters; import org.opensearch.search.aggregations.bucket.filter.FiltersAggregationBuilder; import org.opensearch.search.aggregations.bucket.filter.FiltersAggregator.KeyedFilter; -import org.opensearch.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; -import org.opensearch.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; -import org.opensearch.search.aggregations.bucket.geogrid.InternalGeoHashGrid; -import org.opensearch.search.aggregations.bucket.geogrid.InternalGeoTileGrid; import org.opensearch.search.aggregations.bucket.global.Global; import org.opensearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; @@ -261,20 +257,6 @@ public static HistogramAggregationBuilder histogram(String name) { return new HistogramAggregationBuilder(name); } - /** - * Create a new {@link InternalGeoHashGrid} aggregation with the given name. - */ - public static GeoHashGridAggregationBuilder geohashGrid(String name) { - return new GeoHashGridAggregationBuilder(name); - } - - /** - * Create a new {@link InternalGeoTileGrid} aggregation with the given name. - */ - public static GeoTileGridAggregationBuilder geotileGrid(String name) { - return new GeoTileGridAggregationBuilder(name); - } - /** * Create a new {@link SignificantTerms} aggregation with the given name. */ diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileUtils.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/GeoTileUtils.java similarity index 97% rename from server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileUtils.java rename to server/src/main/java/org/opensearch/search/aggregations/bucket/GeoTileUtils.java index 5498b2b1a7109..6cd1823622f01 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileUtils.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/GeoTileUtils.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.bucket.geogrid; +package org.opensearch.search.aggregations.bucket; import org.apache.lucene.geo.GeoEncodingUtils; import org.apache.lucene.util.SloppyMath; @@ -104,7 +104,7 @@ private GeoTileUtils() {} * @param parser {@link XContentParser} to parse the value from * @return int representing precision */ - static int parsePrecision(XContentParser parser) throws IOException, OpenSearchParseException { + public static int parsePrecision(XContentParser parser) throws IOException, OpenSearchParseException { final Object node = parser.currentToken().equals(XContentParser.Token.VALUE_NUMBER) ? Integer.valueOf(parser.intValue()) : parser.text(); @@ -252,7 +252,7 @@ public static String stringEncode(long hash) { /** * Decode long hash as a GeoPoint (center of the tile) */ - static GeoPoint hashToGeoPoint(long hash) { + public static GeoPoint hashToGeoPoint(long hash) { int[] res = parseHash(hash); return zxyToGeoPoint(res[0], res[1], res[2]); } @@ -260,7 +260,7 @@ static GeoPoint hashToGeoPoint(long hash) { /** * Decode a string bucket key in "zoom/x/y" format to a GeoPoint (center of the tile) */ - static GeoPoint keyToGeoPoint(String hashAsString) { + public static GeoPoint keyToGeoPoint(String hashAsString) { int[] hashAsInts = parseHash(hashAsString); return zxyToGeoPoint(hashAsInts[0], hashAsInts[1], hashAsInts[2]); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java index 8b07df3f689bf..093c2ad42722e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java @@ -35,9 +35,11 @@ import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.Writeable; import org.opensearch.common.xcontent.ConstructingObjectParser; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.index.query.QueryShardContext; +import org.opensearch.plugins.SearchPlugin; import org.opensearch.search.aggregations.AbstractAggregationBuilder; import org.opensearch.search.aggregations.AggregationBuilder; import org.opensearch.search.aggregations.AggregatorFactories; @@ -47,11 +49,14 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.function.Consumer; +import java.util.function.Function; import static org.opensearch.common.xcontent.ConstructingObjectParser.constructorArg; @@ -82,14 +87,55 @@ public class CompositeAggregationBuilder extends AbstractAggregationBuilder p.map(), AFTER_FIELD_NAME); } - public static void registerAggregators(ValuesSourceRegistry.Builder builder) { + static final Map, Byte> BUILDER_CLASS_TO_BYTE_CODE = new HashMap<>(); + static final Map BUILDER_TYPE_TO_PARSER = new HashMap<>(); + static final Map>> BYTE_CODE_TO_COMPOSITE_VALUE_SOURCE_READER = + new HashMap<>(); + static final Map< + String, + Writeable.Reader>> AGGREGATION_TYPE_TO_COMPOSITE_VALUE_SOURCE_READER = new HashMap<>(); + static final Map, String> BUILDER_CLASS_TO_AGGREGATION_TYPE = new HashMap<>(); + + public static void registerAggregators(ValuesSourceRegistry.Builder builder, final List plugins) { DateHistogramValuesSourceBuilder.register(builder); HistogramValuesSourceBuilder.register(builder); - GeoTileGridValuesSourceBuilder.register(builder); TermsValuesSourceBuilder.register(builder); + // Register All other aggregations that wants to be part of Composite Aggregation which are provided in + // Plugins along with their parsers and serialisation codes + registerCompositeAggregatorsPlugins(plugins, SearchPlugin::getCompositeAggregations, (compositeAggregationSpec) -> { + compositeAggregationSpec.getAggregatorRegistrar().accept(builder); + BUILDER_TYPE_TO_PARSER.put(compositeAggregationSpec.getAggregationType(), compositeAggregationSpec.getParsingFunction()); + // This is added for backward compatibility, so that we can move away from byte code in the serialisation + if (compositeAggregationSpec.getByteCode() != null) { + BYTE_CODE_TO_COMPOSITE_VALUE_SOURCE_READER.put( + (int) compositeAggregationSpec.getByteCode(), + compositeAggregationSpec.getReader() + ); + BUILDER_CLASS_TO_BYTE_CODE.put( + compositeAggregationSpec.getValueSourceBuilderClass(), + compositeAggregationSpec.getByteCode() + ); + } + AGGREGATION_TYPE_TO_COMPOSITE_VALUE_SOURCE_READER.put( + compositeAggregationSpec.getAggregationType(), + compositeAggregationSpec.getReader() + ); + BUILDER_CLASS_TO_AGGREGATION_TYPE.put( + compositeAggregationSpec.getValueSourceBuilderClass(), + compositeAggregationSpec.getAggregationType() + ); + }); builder.registerUsage(NAME); } + private static void registerCompositeAggregatorsPlugins( + final List plugins, + final Function> producer, + final Consumer consumer + ) { + plugins.forEach(searchPlugin -> producer.apply(searchPlugin).forEach(consumer)); + } + private List> sources; private Map after; private int size = 10; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationParsingFunction.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationParsingFunction.java new file mode 100644 index 0000000000000..344563ad20309 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationParsingFunction.java @@ -0,0 +1,22 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.bucket.composite; + +import org.opensearch.common.xcontent.XContentParser; + +import java.io.IOException; + +/** + * A functional interface which encapsulates the parsing function to be called for the aggregation which is + * also registered as CompositeAggregation. + */ +@FunctionalInterface +public interface CompositeAggregationParsingFunction { + CompositeValuesSourceBuilder parse(final String name, final XContentParser parser) throws IOException; +} diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java index 7764d367a0cec..26015ae04cf76 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java @@ -69,11 +69,11 @@ public abstract class CompositeValuesSourceBuilder createValuesSource( * @param missingBucket If true an explicit null bucket will represent documents with missing values. * @param hasScript true if the source contains a script that can change the value. */ - CompositeValuesSourceConfig( + public CompositeValuesSourceConfig( String name, @Nullable MappedFieldType fieldType, ValuesSource vs, @@ -113,21 +113,21 @@ SingleDimensionValuesSource createValuesSource( /** * Returns the name associated with this configuration. */ - String name() { + protected String name() { return name; } /** * Returns the {@link MappedFieldType} for this config. */ - MappedFieldType fieldType() { + public MappedFieldType fieldType() { return fieldType; } /** * Returns the {@link ValuesSource} for this configuration. */ - ValuesSource valuesSource() { + public ValuesSource valuesSource() { return vs; } @@ -135,35 +135,35 @@ ValuesSource valuesSource() { * The {@link DocValueFormat} to use for formatting the keys. * {@link DocValueFormat#RAW} means no formatting. */ - DocValueFormat format() { + public DocValueFormat format() { return format; } /** * If true, an explicit `null bucket represents documents with missing values. */ - boolean missingBucket() { + public boolean missingBucket() { return missingBucket; } /** * Return the {@link MissingOrder} for the config. */ - MissingOrder missingOrder() { + public MissingOrder missingOrder() { return missingOrder; } /** * Returns true if the source contains a script that can change the value. */ - boolean hasScript() { + protected boolean hasScript() { return hasScript; } /** * The sort order for the values source (e.g. -1 for descending and 1 for ascending). */ - int reverseMul() { + public int reverseMul() { assert reverseMul == -1 || reverseMul == 1; return reverseMul; } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceParserHelper.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceParserHelper.java index 60d7f277f7650..d8526e684f391 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceParserHelper.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceParserHelper.java @@ -49,6 +49,11 @@ import java.io.IOException; import static org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken; +import static org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder.AGGREGATION_TYPE_TO_COMPOSITE_VALUE_SOURCE_READER; +import static org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder.BUILDER_CLASS_TO_AGGREGATION_TYPE; +import static org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder.BUILDER_CLASS_TO_BYTE_CODE; +import static org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder.BUILDER_TYPE_TO_PARSER; +import static org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder.BYTE_CODE_TO_COMPOSITE_VALUE_SOURCE_READER; /** * Helper class for obtaining values source parsers for different aggs @@ -57,7 +62,11 @@ */ public class CompositeValuesSourceParserHelper { - static , T> void declareValuesSourceFields(AbstractObjectParser objectParser) { + private static final int AGGREGATION_TYPE_REFERENCE = Byte.MAX_VALUE; + + public static , T> void declareValuesSourceFields( + AbstractObjectParser objectParser + ) { objectParser.declareField(VB::field, XContentParser::text, new ParseField("field"), ObjectParser.ValueType.STRING); objectParser.declareBoolean(VB::missingBucket, new ParseField("missing_bucket")); objectParser.declareString(VB::missingOrder, new ParseField(MissingOrder.NAME)); @@ -78,28 +87,45 @@ static , T> void declareValuesSource } public static void writeTo(CompositeValuesSourceBuilder builder, StreamOutput out) throws IOException { - final byte code; + int code = Byte.MIN_VALUE; + String aggregationType = null; if (builder.getClass() == TermsValuesSourceBuilder.class) { code = 0; } else if (builder.getClass() == DateHistogramValuesSourceBuilder.class) { code = 1; } else if (builder.getClass() == HistogramValuesSourceBuilder.class) { code = 2; - } else if (builder.getClass() == GeoTileGridValuesSourceBuilder.class) { - if (out.getVersion().before(LegacyESVersion.V_7_5_0)) { - throw new IOException( - "Attempting to serialize [" - + builder.getClass().getSimpleName() - + "] to a node with unsupported version [" - + out.getVersion() - + "]" - ); - } - code = 3; } else { - throw new IOException("invalid builder type: " + builder.getClass().getSimpleName()); + if (!BUILDER_CLASS_TO_BYTE_CODE.containsKey(builder.getClass()) + && !BUILDER_CLASS_TO_AGGREGATION_TYPE.containsKey(builder.getClass())) { + throw new IOException("invalid builder type: " + builder.getClass().getSimpleName()); + } + aggregationType = BUILDER_CLASS_TO_AGGREGATION_TYPE.get(builder.getClass()); + if (BUILDER_CLASS_TO_BYTE_CODE.containsKey(builder.getClass())) { + code = BUILDER_CLASS_TO_BYTE_CODE.get(builder.getClass()); + if (code == 3 && out.getVersion().before(LegacyESVersion.V_7_5_0)) { + throw new IOException( + "Attempting to serialize [" + + builder.getClass().getSimpleName() + + "] to a node with unsupported version [" + + out.getVersion() + + "]" + ); + } + } + } + + if (code != Byte.MIN_VALUE) { + out.writeByte((byte) code); + } else if (!BUILDER_CLASS_TO_BYTE_CODE.containsKey(builder.getClass())) { + /* + * This is added for backward compatibility when 1 data node is using the new code which is using the + * aggregation type and another is using the only byte code in the serialisation. + */ + out.writeByte((byte) AGGREGATION_TYPE_REFERENCE); + assert aggregationType != null; + out.writeString(aggregationType); } - out.writeByte(code); builder.writeTo(out); } @@ -112,10 +138,17 @@ public static CompositeValuesSourceBuilder readFrom(StreamInput in) throws IO return new DateHistogramValuesSourceBuilder(in); case 2: return new HistogramValuesSourceBuilder(in); - case 3: - return new GeoTileGridValuesSourceBuilder(in); + case AGGREGATION_TYPE_REFERENCE: + final String aggregationType = in.readString(); + if (!AGGREGATION_TYPE_TO_COMPOSITE_VALUE_SOURCE_READER.containsKey(aggregationType)) { + throw new IOException("Invalid aggregation type " + aggregationType); + } + return (CompositeValuesSourceBuilder) AGGREGATION_TYPE_TO_COMPOSITE_VALUE_SOURCE_READER.get(aggregationType).read(in); default: - throw new IOException("Invalid code " + code); + if (!BYTE_CODE_TO_COMPOSITE_VALUE_SOURCE_READER.containsKey(code)) { + throw new IOException("Invalid code " + code); + } + return (CompositeValuesSourceBuilder) BYTE_CODE_TO_COMPOSITE_VALUE_SOURCE_READER.get(code).read(in); } } @@ -143,11 +176,11 @@ public static CompositeValuesSourceBuilder fromXContent(XContentParser parser case HistogramValuesSourceBuilder.TYPE: builder = HistogramValuesSourceBuilder.parse(name, parser); break; - case GeoTileGridValuesSourceBuilder.TYPE: - builder = GeoTileGridValuesSourceBuilder.parse(name, parser); - break; default: - throw new ParsingException(parser.getTokenLocation(), "invalid source type: " + type); + if (!BUILDER_TYPE_TO_PARSER.containsKey(type)) { + throw new ParsingException(parser.getTokenLocation(), "invalid source type: " + type); + } + builder = BUILDER_TYPE_TO_PARSER.get(type).parse(name, parser); } parser.nextToken(); parser.nextToken(); @@ -163,4 +196,5 @@ public static XContentBuilder toXContent(CompositeValuesSourceBuilder source, builder.endObject(); return builder; } + } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/LongValuesSource.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/LongValuesSource.java index a7ed50507288d..ec6410c2a9377 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/LongValuesSource.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/LongValuesSource.java @@ -66,7 +66,7 @@ * * @opensearch.internal */ -class LongValuesSource extends SingleDimensionValuesSource { +public class LongValuesSource extends SingleDimensionValuesSource { private final BigArrays bigArrays; private final CheckedFunction docValuesFunc; private final LongUnaryOperator rounding; @@ -76,7 +76,7 @@ class LongValuesSource extends SingleDimensionValuesSource { private long currentValue; private boolean missingCurrentValue; - LongValuesSource( + public LongValuesSource( BigArrays bigArrays, MappedFieldType fieldType, CheckedFunction docValuesFunc, @@ -165,7 +165,7 @@ private int compareValues(long v1, long v2) { } @Override - void setAfter(Comparable value) { + protected void setAfter(Comparable value) { if (missingBucket && value == null) { afterValue = null; } else { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/SingleDimensionValuesSource.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/SingleDimensionValuesSource.java index 747a7017ec872..fe0801d6d230e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/SingleDimensionValuesSource.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/SingleDimensionValuesSource.java @@ -53,7 +53,7 @@ * * @opensearch.internal */ -abstract class SingleDimensionValuesSource> implements Releasable { +public abstract class SingleDimensionValuesSource> implements Releasable { protected final BigArrays bigArrays; protected final DocValueFormat format; @Nullable diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/AggregationInspectionHelper.java b/server/src/main/java/org/opensearch/search/aggregations/support/AggregationInspectionHelper.java index f36c4620d5b33..b4da1d10b4b68 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/AggregationInspectionHelper.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/AggregationInspectionHelper.java @@ -35,7 +35,6 @@ import org.opensearch.search.aggregations.bucket.composite.InternalComposite; import org.opensearch.search.aggregations.bucket.filter.InternalFilter; import org.opensearch.search.aggregations.bucket.filter.InternalFilters; -import org.opensearch.search.aggregations.bucket.geogrid.InternalGeoGrid; import org.opensearch.search.aggregations.bucket.global.InternalGlobal; import org.opensearch.search.aggregations.bucket.histogram.InternalVariableWidthHistogram; import org.opensearch.search.aggregations.bucket.histogram.InternalAutoDateHistogram; @@ -119,10 +118,6 @@ public static boolean hasValue(InternalFilter agg) { return agg.getDocCount() > 0; } - public static boolean hasValue(InternalGeoGrid agg) { - return agg.getBuckets().stream().anyMatch(bucket -> bucket.getDocCount() > 0); - } - public static boolean hasValue(InternalGlobal agg) { return agg.getDocCount() > 0; } diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/package-info.java b/server/src/main/java/org/opensearch/search/aggregations/support/package-info.java index e16e8c91b3fd0..dd2c16f1daa0e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/package-info.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/package-info.java @@ -43,7 +43,7 @@ * output). A class hierarchy defines the type of values returned by the source. The top level sub-classes define type-specific behavior, * such as {@link org.opensearch.search.aggregations.support.ValuesSource.Numeric#isFloatingPoint()}. Second level subclasses are * then specialized based on where they read values from, e.g. script or field cases. There are also adapter classes like - * {@link org.opensearch.search.aggregations.bucket.geogrid.CellIdSource} which do run-time conversion from one type to another, often + * org.opensearch.search.aggregations.bucket.geogrid.CellIdSource which do run-time conversion from one type to another, often * dependent on a user specified parameter (precision in that case). *

* diff --git a/server/src/test/java/org/opensearch/search/DocValueFormatTests.java b/server/src/test/java/org/opensearch/search/DocValueFormatTests.java index 36a6eb3ae87b0..bd0fbfe69960c 100644 --- a/server/src/test/java/org/opensearch/search/DocValueFormatTests.java +++ b/server/src/test/java/org/opensearch/search/DocValueFormatTests.java @@ -48,7 +48,7 @@ import java.util.ArrayList; import java.util.List; -import static org.opensearch.search.aggregations.bucket.geogrid.GeoTileUtils.longEncode; +import static org.opensearch.search.aggregations.bucket.GeoTileUtils.longEncode; public class DocValueFormatTests extends OpenSearchTestCase { diff --git a/server/src/test/java/org/opensearch/search/aggregations/AggregationsTests.java b/server/src/test/java/org/opensearch/search/aggregations/AggregationsTests.java index 111ce23f8a0cb..94fb6cded637d 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/AggregationsTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/AggregationsTests.java @@ -48,8 +48,6 @@ import org.opensearch.search.aggregations.bucket.composite.InternalCompositeTests; import org.opensearch.search.aggregations.bucket.filter.InternalFilterTests; import org.opensearch.search.aggregations.bucket.filter.InternalFiltersTests; -import org.opensearch.search.aggregations.bucket.geogrid.GeoHashGridTests; -import org.opensearch.search.aggregations.bucket.geogrid.GeoTileGridTests; import org.opensearch.search.aggregations.bucket.global.InternalGlobalTests; import org.opensearch.search.aggregations.bucket.histogram.InternalAutoDateHistogramTests; import org.opensearch.search.aggregations.bucket.histogram.InternalDateHistogramTests; @@ -157,8 +155,6 @@ private static List> getAggsTests() { aggsTests.add(new InternalGlobalTests()); aggsTests.add(new InternalFilterTests()); aggsTests.add(new InternalSamplerTests()); - aggsTests.add(new GeoHashGridTests()); - aggsTests.add(new GeoTileGridTests()); aggsTests.add(new InternalRangeTests()); aggsTests.add(new InternalDateRangeTests()); aggsTests.add(new InternalGeoDistanceTests()); diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationBuilderTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationBuilderTests.java index c4a87f3993bb4..9290183ec7312 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationBuilderTests.java @@ -32,10 +32,8 @@ package org.opensearch.search.aggregations.bucket.composite; -import org.opensearch.common.geo.GeoBoundingBoxTests; import org.opensearch.script.Script; import org.opensearch.search.aggregations.BaseAggregationTestCase; -import org.opensearch.search.aggregations.bucket.geogrid.GeoTileUtils; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.opensearch.search.aggregations.bucket.missing.MissingOrder; import org.opensearch.search.sort.SortOrder; @@ -74,17 +72,6 @@ private DateHistogramValuesSourceBuilder randomDateHistogramSourceBuilder() { return histo; } - private GeoTileGridValuesSourceBuilder randomGeoTileGridValuesSourceBuilder() { - GeoTileGridValuesSourceBuilder geoTile = new GeoTileGridValuesSourceBuilder(randomAlphaOfLengthBetween(5, 10)); - if (randomBoolean()) { - geoTile.precision(randomIntBetween(0, GeoTileUtils.MAX_ZOOM)); - } - if (randomBoolean()) { - geoTile.geoBoundingBox(GeoBoundingBoxTests.randomBBox()); - } - return geoTile; - } - private TermsValuesSourceBuilder randomTermsSourceBuilder() { TermsValuesSourceBuilder terms = new TermsValuesSourceBuilder(randomAlphaOfLengthBetween(5, 10)); if (randomBoolean()) { @@ -118,11 +105,9 @@ private HistogramValuesSourceBuilder randomHistogramSourceBuilder() { @Override protected CompositeAggregationBuilder createTestAggregatorBuilder() { int numSources = randomIntBetween(1, 10); - numSources = 1; List> sources = new ArrayList<>(); for (int i = 0; i < numSources; i++) { - int type = randomIntBetween(0, 3); - type = 3; + int type = randomIntBetween(0, 2); switch (type) { case 0: sources.add(randomTermsSourceBuilder()); @@ -133,9 +118,6 @@ protected CompositeAggregationBuilder createTestAggregatorBuilder() { case 2: sources.add(randomHistogramSourceBuilder()); break; - case 3: - sources.add(randomGeoTileGridValuesSourceBuilder()); - break; default: throw new AssertionError("wrong branch"); } diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java index 88b2323b8adfc..25003e0b84567 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java @@ -32,68 +32,24 @@ package org.opensearch.search.aggregations.bucket.composite; -import org.apache.lucene.tests.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.DoublePoint; -import org.apache.lucene.document.Field; -import org.apache.lucene.document.InetAddressPoint; -import org.apache.lucene.document.IntPoint; -import org.apache.lucene.document.LatLonPoint; import org.apache.lucene.document.LongPoint; -import org.apache.lucene.document.SortedNumericDocValuesField; -import org.apache.lucene.document.SortedSetDocValuesField; -import org.apache.lucene.document.StringField; -import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.search.DocValuesFieldExistsQuery; -import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.Sort; -import org.apache.lucene.search.SortField; -import org.apache.lucene.search.SortedNumericSortField; -import org.apache.lucene.search.SortedSetSortField; import org.apache.lucene.search.TermQuery; -import org.apache.lucene.store.Directory; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.NumericUtils; -import org.apache.lucene.tests.util.TestUtil; import org.opensearch.OpenSearchParseException; -import org.opensearch.common.geo.GeoPoint; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.text.Text; -import org.opensearch.common.time.DateFormatter; -import org.opensearch.common.time.DateFormatters; -import org.opensearch.index.Index; -import org.opensearch.index.IndexSettings; -import org.opensearch.index.mapper.DateFieldMapper; -import org.opensearch.index.mapper.DocumentMapper; -import org.opensearch.index.mapper.GeoPointFieldMapper; -import org.opensearch.index.mapper.IpFieldMapper; -import org.opensearch.index.mapper.KeywordFieldMapper; -import org.opensearch.index.mapper.MappedFieldType; -import org.opensearch.index.mapper.MapperService; -import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.search.aggregations.Aggregator; -import org.opensearch.search.aggregations.AggregatorTestCase; -import org.opensearch.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; -import org.opensearch.search.aggregations.bucket.geogrid.GeoTileUtils; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.opensearch.search.aggregations.bucket.missing.MissingOrder; import org.opensearch.search.aggregations.bucket.terms.StringTerms; import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.opensearch.search.aggregations.composite.BaseCompositeAggregatorTestCase; import org.opensearch.search.aggregations.metrics.InternalMax; import org.opensearch.search.aggregations.metrics.MaxAggregationBuilder; import org.opensearch.search.aggregations.metrics.TopHits; import org.opensearch.search.aggregations.metrics.TopHitsAggregationBuilder; import org.opensearch.search.aggregations.support.ValueType; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.IndexSettingsModule; -import org.junit.After; -import org.junit.Before; import java.io.IOException; import java.net.InetAddress; @@ -109,51 +65,14 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; -import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Supplier; -import java.util.stream.Collectors; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -public class CompositeAggregatorTests extends AggregatorTestCase { - private static MappedFieldType[] FIELD_TYPES; - - @Override - @Before - public void setUp() throws Exception { - super.setUp(); - FIELD_TYPES = new MappedFieldType[8]; - FIELD_TYPES[0] = new KeywordFieldMapper.KeywordFieldType("keyword"); - FIELD_TYPES[1] = new NumberFieldMapper.NumberFieldType("long", NumberFieldMapper.NumberType.LONG); - FIELD_TYPES[2] = new NumberFieldMapper.NumberFieldType("double", NumberFieldMapper.NumberType.DOUBLE); - FIELD_TYPES[3] = new DateFieldMapper.DateFieldType("date", DateFormatter.forPattern("yyyy-MM-dd||epoch_millis")); - FIELD_TYPES[4] = new NumberFieldMapper.NumberFieldType("price", NumberFieldMapper.NumberType.INTEGER); - FIELD_TYPES[5] = new KeywordFieldMapper.KeywordFieldType("terms"); - FIELD_TYPES[6] = new IpFieldMapper.IpFieldType("ip"); - FIELD_TYPES[7] = new GeoPointFieldMapper.GeoPointFieldType("geo_point"); - } - - @Override - @After - public void tearDown() throws Exception { - super.tearDown(); - FIELD_TYPES = null; - } - @Override - protected MapperService mapperServiceMock() { - MapperService mapperService = mock(MapperService.class); - DocumentMapper mapper = mock(DocumentMapper.class); - when(mapper.typeText()).thenReturn(new Text("_doc")); - when(mapper.type()).thenReturn("_doc"); - when(mapperService.documentMapper()).thenReturn(mapper); - return mapperService; - } +public class CompositeAggregatorTests extends BaseCompositeAggregatorTestCase { public void testUnmappedFieldWithTerms() throws Exception { final List>> dataset = new ArrayList<>(); @@ -234,80 +153,6 @@ public void testUnmappedFieldWithTerms() throws Exception { ); } - public void testUnmappedFieldWithGeopoint() throws Exception { - final List>> dataset = new ArrayList<>(); - final String mappedFieldName = "geo_point"; - dataset.addAll( - Arrays.asList( - createDocument(mappedFieldName, new GeoPoint(48.934059, 41.610741)), - createDocument(mappedFieldName, new GeoPoint(-23.065941, 113.610741)), - createDocument(mappedFieldName, new GeoPoint(90.0, 0.0)), - createDocument(mappedFieldName, new GeoPoint(37.2343, -115.8067)), - createDocument(mappedFieldName, new GeoPoint(90.0, 0.0)) - ) - ); - - // just unmapped = no results - testSearchCase( - Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery(mappedFieldName)), - dataset, - () -> new CompositeAggregationBuilder("name", Arrays.asList(new GeoTileGridValuesSourceBuilder("unmapped").field("unmapped"))), - (result) -> assertEquals(0, result.getBuckets().size()) - ); - - // unmapped missing bucket = one result - testSearchCase( - Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery(mappedFieldName)), - dataset, - () -> new CompositeAggregationBuilder( - "name", - Arrays.asList(new GeoTileGridValuesSourceBuilder("unmapped").field("unmapped").missingBucket(true)) - ), - (result) -> { - assertEquals(1, result.getBuckets().size()); - assertEquals("{unmapped=null}", result.afterKey().toString()); - assertEquals("{unmapped=null}", result.getBuckets().get(0).getKeyAsString()); - assertEquals(5L, result.getBuckets().get(0).getDocCount()); - } - ); - - // field + unmapped, no missing bucket = no results - testSearchCase( - Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery(mappedFieldName)), - dataset, - () -> new CompositeAggregationBuilder( - "name", - Arrays.asList( - new GeoTileGridValuesSourceBuilder(mappedFieldName).field(mappedFieldName), - new GeoTileGridValuesSourceBuilder("unmapped").field("unmapped") - ) - ), - (result) -> assertEquals(0, result.getBuckets().size()) - ); - - // field + unmapped with missing bucket = multiple results - testSearchCase( - Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery(mappedFieldName)), - dataset, - () -> new CompositeAggregationBuilder( - "name", - Arrays.asList( - new GeoTileGridValuesSourceBuilder(mappedFieldName).field(mappedFieldName), - new GeoTileGridValuesSourceBuilder("unmapped").field("unmapped").missingBucket(true) - ) - ), - (result) -> { - assertEquals(2, result.getBuckets().size()); - assertEquals("{geo_point=7/64/56, unmapped=null}", result.afterKey().toString()); - assertEquals("{geo_point=7/32/56, unmapped=null}", result.getBuckets().get(0).getKeyAsString()); - assertEquals(2L, result.getBuckets().get(0).getDocCount()); - assertEquals("{geo_point=7/64/56, unmapped=null}", result.getBuckets().get(1).getKeyAsString()); - assertEquals(3L, result.getBuckets().get(1).getDocCount()); - } - ); - - } - public void testUnmappedFieldWithHistogram() throws Exception { final List>> dataset = new ArrayList<>(); final String mappedFieldName = "price"; @@ -2483,42 +2328,6 @@ public void testWithIP() throws Exception { }); } - public void testWithGeoPoint() throws Exception { - final List>> dataset = new ArrayList<>(); - dataset.addAll( - Arrays.asList( - createDocument("geo_point", new GeoPoint(48.934059, 41.610741)), - createDocument("geo_point", new GeoPoint(-23.065941, 113.610741)), - createDocument("geo_point", new GeoPoint(90.0, 0.0)), - createDocument("geo_point", new GeoPoint(37.2343, -115.8067)), - createDocument("geo_point", new GeoPoint(90.0, 0.0)) - ) - ); - testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("geo_point")), dataset, () -> { - GeoTileGridValuesSourceBuilder geoTile = new GeoTileGridValuesSourceBuilder("geo_point").field("geo_point"); - return new CompositeAggregationBuilder("name", Collections.singletonList(geoTile)); - }, (result) -> { - assertEquals(2, result.getBuckets().size()); - assertEquals("{geo_point=7/64/56}", result.afterKey().toString()); - assertEquals("{geo_point=7/32/56}", result.getBuckets().get(0).getKeyAsString()); - assertEquals(2L, result.getBuckets().get(0).getDocCount()); - assertEquals("{geo_point=7/64/56}", result.getBuckets().get(1).getKeyAsString()); - assertEquals(3L, result.getBuckets().get(1).getDocCount()); - }); - - testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("geo_point")), dataset, () -> { - GeoTileGridValuesSourceBuilder geoTile = new GeoTileGridValuesSourceBuilder("geo_point").field("geo_point"); - return new CompositeAggregationBuilder("name", Collections.singletonList(geoTile)).aggregateAfter( - Collections.singletonMap("geo_point", "7/32/56") - ); - }, (result) -> { - assertEquals(1, result.getBuckets().size()); - assertEquals("{geo_point=7/64/56}", result.afterKey().toString()); - assertEquals("{geo_point=7/64/56}", result.getBuckets().get(0).getKeyAsString()); - assertEquals(3L, result.getBuckets().get(0).getDocCount()); - }); - } - public void testEarlyTermination() throws Exception { final List>> dataset = new ArrayList<>(); dataset.addAll( @@ -2648,193 +2457,4 @@ public void testIndexSortWithDuplicate() throws Exception { ); } } - - private void testSearchCase( - List queries, - List>> dataset, - Supplier create, - Consumer verify - ) throws IOException { - for (Query query : queries) { - executeTestCase(false, false, query, dataset, create, verify); - executeTestCase(false, true, query, dataset, create, verify); - } - } - - private void executeTestCase( - boolean forceMerge, - boolean useIndexSort, - Query query, - List>> dataset, - Supplier create, - Consumer verify - ) throws IOException { - Map types = Arrays.stream(FIELD_TYPES) - .collect(Collectors.toMap(MappedFieldType::name, Function.identity())); - CompositeAggregationBuilder aggregationBuilder = create.get(); - Sort indexSort = useIndexSort ? buildIndexSort(aggregationBuilder.sources(), types) : null; - IndexSettings indexSettings = createIndexSettings(indexSort); - try (Directory directory = newDirectory()) { - IndexWriterConfig config = newIndexWriterConfig(random(), new MockAnalyzer(random())); - if (indexSort != null) { - config.setIndexSort(indexSort); - config.setCodec(TestUtil.getDefaultCodec()); - } - try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory, config)) { - Document document = new Document(); - int id = 0; - for (Map> fields : dataset) { - document.clear(); - addToDocument(id, document, fields); - indexWriter.addDocument(document); - id++; - } - if (forceMerge || rarely()) { - // forceMerge randomly or if the collector-per-leaf testing stuff would break the tests. - indexWriter.forceMerge(1); - } else { - if (dataset.size() > 0) { - int numDeletes = randomIntBetween(1, 25); - for (int i = 0; i < numDeletes; i++) { - id = randomIntBetween(0, dataset.size() - 1); - indexWriter.deleteDocuments(new Term("id", Integer.toString(id))); - document.clear(); - addToDocument(id, document, dataset.get(id)); - indexWriter.addDocument(document); - } - } - - } - } - try (IndexReader indexReader = DirectoryReader.open(directory)) { - IndexSearcher indexSearcher = new IndexSearcher(indexReader); - InternalComposite composite = searchAndReduce(indexSettings, indexSearcher, query, aggregationBuilder, FIELD_TYPES); - verify.accept(composite); - } - } - } - - private static IndexSettings createIndexSettings(Sort sort) { - Settings.Builder builder = Settings.builder(); - if (sort != null) { - String[] fields = Arrays.stream(sort.getSort()).map(SortField::getField).toArray(String[]::new); - String[] orders = Arrays.stream(sort.getSort()).map((o) -> o.getReverse() ? "desc" : "asc").toArray(String[]::new); - builder.putList("index.sort.field", fields); - builder.putList("index.sort.order", orders); - } - return IndexSettingsModule.newIndexSettings(new Index("_index", "0"), builder.build()); - } - - private void addToDocument(int id, Document doc, Map> keys) { - doc.add(new StringField("id", Integer.toString(id), Field.Store.NO)); - for (Map.Entry> entry : keys.entrySet()) { - final String name = entry.getKey(); - for (Object value : entry.getValue()) { - if (value instanceof Integer) { - doc.add(new SortedNumericDocValuesField(name, (int) value)); - doc.add(new IntPoint(name, (int) value)); - } else if (value instanceof Long) { - doc.add(new SortedNumericDocValuesField(name, (long) value)); - doc.add(new LongPoint(name, (long) value)); - } else if (value instanceof Double) { - doc.add(new SortedNumericDocValuesField(name, NumericUtils.doubleToSortableLong((double) value))); - doc.add(new DoublePoint(name, (double) value)); - } else if (value instanceof String) { - doc.add(new SortedSetDocValuesField(name, new BytesRef((String) value))); - doc.add(new StringField(name, new BytesRef((String) value), Field.Store.NO)); - } else if (value instanceof InetAddress) { - doc.add(new SortedSetDocValuesField(name, new BytesRef(InetAddressPoint.encode((InetAddress) value)))); - doc.add(new InetAddressPoint(name, (InetAddress) value)); - } else if (value instanceof GeoPoint) { - GeoPoint point = (GeoPoint) value; - doc.add( - new SortedNumericDocValuesField( - name, - GeoTileUtils.longEncode(point.lon(), point.lat(), GeoTileGridAggregationBuilder.DEFAULT_PRECISION) - ) - ); - doc.add(new LatLonPoint(name, point.lat(), point.lon())); - } else { - throw new AssertionError("invalid object: " + value.getClass().getSimpleName()); - } - } - } - } - - private static Map createAfterKey(Object... fields) { - assert fields.length % 2 == 0; - final Map map = new HashMap<>(); - for (int i = 0; i < fields.length; i += 2) { - String field = (String) fields[i]; - map.put(field, fields[i + 1]); - } - return map; - } - - @SuppressWarnings("unchecked") - private static Map> createDocument(Object... fields) { - assert fields.length % 2 == 0; - final Map> map = new HashMap<>(); - for (int i = 0; i < fields.length; i += 2) { - String field = (String) fields[i]; - if (fields[i + 1] instanceof List) { - map.put(field, (List) fields[i + 1]); - } else { - map.put(field, Collections.singletonList(fields[i + 1])); - } - } - return map; - } - - private static long asLong(String dateTime) { - return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(dateTime)).toInstant().toEpochMilli(); - } - - private static Sort buildIndexSort(List> sources, Map fieldTypes) { - List sortFields = new ArrayList<>(); - Map remainingFieldTypes = new HashMap<>(fieldTypes); - for (CompositeValuesSourceBuilder source : sources) { - MappedFieldType type = fieldTypes.remove(source.field()); - remainingFieldTypes.remove(source.field()); - SortField sortField = sortFieldFrom(type); - if (sortField == null) { - break; - } - sortFields.add(sortField); - } - while (remainingFieldTypes.size() > 0 && randomBoolean()) { - // Add extra unused sorts - List fields = new ArrayList<>(remainingFieldTypes.keySet()); - Collections.sort(fields); - String field = fields.get(between(0, fields.size() - 1)); - SortField sortField = sortFieldFrom(remainingFieldTypes.remove(field)); - if (sortField != null) { - sortFields.add(sortField); - } - } - return sortFields.size() > 0 ? new Sort(sortFields.toArray(new SortField[0])) : null; - } - - private static SortField sortFieldFrom(MappedFieldType type) { - if (type instanceof KeywordFieldMapper.KeywordFieldType) { - return new SortedSetSortField(type.name(), false); - } else if (type instanceof DateFieldMapper.DateFieldType) { - return new SortedNumericSortField(type.name(), SortField.Type.LONG, false); - } else if (type instanceof NumberFieldMapper.NumberFieldType) { - switch (type.typeName()) { - case "byte": - case "short": - case "integer": - return new SortedNumericSortField(type.name(), SortField.Type.INT, false); - case "long": - return new SortedNumericSortField(type.name(), SortField.Type.LONG, false); - case "float": - case "double": - return new SortedNumericSortField(type.name(), SortField.Type.DOUBLE, false); - default: - return null; - } - } - return null; - } } diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileUtilsTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileUtilsTests.java index dfe4034650594..1443208a1d2fc 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileUtilsTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/geogrid/GeoTileUtilsTests.java @@ -34,14 +34,15 @@ import org.opensearch.common.geo.GeoPoint; import org.opensearch.geometry.Rectangle; +import org.opensearch.search.aggregations.bucket.GeoTileUtils; import org.opensearch.test.OpenSearchTestCase; -import static org.opensearch.search.aggregations.bucket.geogrid.GeoTileUtils.MAX_ZOOM; -import static org.opensearch.search.aggregations.bucket.geogrid.GeoTileUtils.checkPrecisionRange; -import static org.opensearch.search.aggregations.bucket.geogrid.GeoTileUtils.hashToGeoPoint; -import static org.opensearch.search.aggregations.bucket.geogrid.GeoTileUtils.keyToGeoPoint; -import static org.opensearch.search.aggregations.bucket.geogrid.GeoTileUtils.longEncode; -import static org.opensearch.search.aggregations.bucket.geogrid.GeoTileUtils.stringEncode; +import static org.opensearch.search.aggregations.bucket.GeoTileUtils.MAX_ZOOM; +import static org.opensearch.search.aggregations.bucket.GeoTileUtils.checkPrecisionRange; +import static org.opensearch.search.aggregations.bucket.GeoTileUtils.hashToGeoPoint; +import static org.opensearch.search.aggregations.bucket.GeoTileUtils.keyToGeoPoint; +import static org.opensearch.search.aggregations.bucket.GeoTileUtils.longEncode; +import static org.opensearch.search.aggregations.bucket.GeoTileUtils.stringEncode; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.containsString; diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/composite/BaseCompositeAggregatorTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/composite/BaseCompositeAggregatorTestCase.java new file mode 100644 index 0000000000000..7d00772913d6e --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/composite/BaseCompositeAggregatorTestCase.java @@ -0,0 +1,310 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.composite; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.DoublePoint; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.InetAddressPoint; +import org.apache.lucene.document.IntPoint; +import org.apache.lucene.document.LongPoint; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.document.SortedSetDocValuesField; +import org.apache.lucene.document.StringField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.SortedNumericSortField; +import org.apache.lucene.search.SortedSetSortField; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.analysis.MockAnalyzer; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.apache.lucene.tests.util.TestUtil; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.NumericUtils; +import org.junit.After; +import org.junit.Before; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.text.Text; +import org.opensearch.common.time.DateFormatter; +import org.opensearch.common.time.DateFormatters; +import org.opensearch.index.Index; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.mapper.DateFieldMapper; +import org.opensearch.index.mapper.DocumentMapper; +import org.opensearch.index.mapper.IpFieldMapper; +import org.opensearch.index.mapper.KeywordFieldMapper; +import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.mapper.NumberFieldMapper; +import org.opensearch.search.aggregations.AggregatorTestCase; +import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; +import org.opensearch.search.aggregations.bucket.composite.CompositeValuesSourceBuilder; +import org.opensearch.search.aggregations.bucket.composite.InternalComposite; +import org.opensearch.test.IndexSettingsModule; + +import java.io.IOException; +import java.net.InetAddress; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Base class for the Aggregator Tests which are registered under Composite Aggregation. + */ +public class BaseCompositeAggregatorTestCase extends AggregatorTestCase { + + protected static List FIELD_TYPES; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + FIELD_TYPES = new ArrayList<>(); + FIELD_TYPES.add(new KeywordFieldMapper.KeywordFieldType("keyword")); + FIELD_TYPES.add(new NumberFieldMapper.NumberFieldType("long", NumberFieldMapper.NumberType.LONG)); + FIELD_TYPES.add(new NumberFieldMapper.NumberFieldType("double", NumberFieldMapper.NumberType.DOUBLE)); + FIELD_TYPES.add(new DateFieldMapper.DateFieldType("date", DateFormatter.forPattern("yyyy-MM-dd||epoch_millis"))); + FIELD_TYPES.add(new NumberFieldMapper.NumberFieldType("price", NumberFieldMapper.NumberType.INTEGER)); + FIELD_TYPES.add(new KeywordFieldMapper.KeywordFieldType("terms")); + FIELD_TYPES.add(new IpFieldMapper.IpFieldType("ip")); + } + + @Override + @After + public void tearDown() throws Exception { + super.tearDown(); + FIELD_TYPES = null; + } + + @Override + protected MapperService mapperServiceMock() { + MapperService mapperService = mock(MapperService.class); + DocumentMapper mapper = mock(DocumentMapper.class); + when(mapper.typeText()).thenReturn(new Text("_doc")); + when(mapper.type()).thenReturn("_doc"); + when(mapperService.documentMapper()).thenReturn(mapper); + return mapperService; + } + + protected static Map> createDocument(Object... fields) { + assert fields.length % 2 == 0; + final Map> map = new HashMap<>(); + for (int i = 0; i < fields.length; i += 2) { + String field = (String) fields[i]; + if (fields[i + 1] instanceof List) { + map.put(field, (List) fields[i + 1]); + } else { + map.put(field, Collections.singletonList(fields[i + 1])); + } + } + return map; + } + + protected void testSearchCase( + List queries, + List>> dataset, + Supplier create, + Consumer verify + ) throws IOException { + for (Query query : queries) { + executeTestCase(false, false, query, dataset, create, verify); + executeTestCase(false, true, query, dataset, create, verify); + } + } + + protected void executeTestCase( + boolean forceMerge, + boolean useIndexSort, + Query query, + List>> dataset, + Supplier create, + Consumer verify + ) throws IOException { + Map types = FIELD_TYPES.stream().collect(Collectors.toMap(MappedFieldType::name, Function.identity())); + CompositeAggregationBuilder aggregationBuilder = create.get(); + Sort indexSort = useIndexSort ? buildIndexSort(aggregationBuilder.sources(), types) : null; + IndexSettings indexSettings = createIndexSettings(indexSort); + try (Directory directory = newDirectory()) { + IndexWriterConfig config = newIndexWriterConfig(random(), new MockAnalyzer(random())); + if (indexSort != null) { + config.setIndexSort(indexSort); + config.setCodec(TestUtil.getDefaultCodec()); + } + try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory, config)) { + Document document = new Document(); + int id = 0; + for (Map> fields : dataset) { + document.clear(); + addToDocument(id, document, fields); + indexWriter.addDocument(document); + id++; + } + if (forceMerge || rarely()) { + // forceMerge randomly or if the collector-per-leaf testing stuff would break the tests. + indexWriter.forceMerge(1); + } else { + if (dataset.size() > 0) { + int numDeletes = randomIntBetween(1, 25); + for (int i = 0; i < numDeletes; i++) { + id = randomIntBetween(0, dataset.size() - 1); + indexWriter.deleteDocuments(new Term("id", Integer.toString(id))); + document.clear(); + addToDocument(id, document, dataset.get(id)); + indexWriter.addDocument(document); + } + } + + } + } + try (IndexReader indexReader = DirectoryReader.open(directory)) { + IndexSearcher indexSearcher = new IndexSearcher(indexReader); + InternalComposite composite = searchAndReduce( + indexSettings, + indexSearcher, + query, + aggregationBuilder, + FIELD_TYPES.toArray(new MappedFieldType[0]) + ); + verify.accept(composite); + } + } + } + + protected void addToDocument(int id, Document doc, Map> keys) { + doc.add(new StringField("id", Integer.toString(id), Field.Store.NO)); + for (Map.Entry> entry : keys.entrySet()) { + final String name = entry.getKey(); + for (Object value : entry.getValue()) { + if (value instanceof Integer) { + doc.add(new SortedNumericDocValuesField(name, (int) value)); + doc.add(new IntPoint(name, (int) value)); + } else if (value instanceof Long) { + doc.add(new SortedNumericDocValuesField(name, (long) value)); + doc.add(new LongPoint(name, (long) value)); + } else if (value instanceof Double) { + doc.add(new SortedNumericDocValuesField(name, NumericUtils.doubleToSortableLong((double) value))); + doc.add(new DoublePoint(name, (double) value)); + } else if (value instanceof String) { + doc.add(new SortedSetDocValuesField(name, new BytesRef((String) value))); + doc.add(new StringField(name, new BytesRef((String) value), Field.Store.NO)); + } else if (value instanceof InetAddress) { + doc.add(new SortedSetDocValuesField(name, new BytesRef(InetAddressPoint.encode((InetAddress) value)))); + doc.add(new InetAddressPoint(name, (InetAddress) value)); + } else { + if (!addValueToDocument(doc, name, value)) throw new AssertionError( + "invalid object: " + value.getClass().getSimpleName() + ); + } + } + } + } + + /** + * Override this function to handle any specific type of value you want to add in the document for doing the + * composite aggregation. If you have added another Composite Aggregation Type then you must override this + * function so that your field value can be added in the document correctly. + * + * @param doc {@link Document} + * @param name {@link String} Field Name + * @param value {@link Object} Field value + * @return boolean true or false, based on if value is added or not + */ + protected boolean addValueToDocument(final Document doc, final String name, final Object value) { + return false; + } + + protected static Sort buildIndexSort(List> sources, Map fieldTypes) { + List sortFields = new ArrayList<>(); + Map remainingFieldTypes = new HashMap<>(fieldTypes); + for (CompositeValuesSourceBuilder source : sources) { + MappedFieldType type = fieldTypes.remove(source.field()); + remainingFieldTypes.remove(source.field()); + SortField sortField = sortFieldFrom(type); + if (sortField == null) { + break; + } + sortFields.add(sortField); + } + while (remainingFieldTypes.size() > 0 && randomBoolean()) { + // Add extra unused sorts + List fields = new ArrayList<>(remainingFieldTypes.keySet()); + Collections.sort(fields); + String field = fields.get(between(0, fields.size() - 1)); + SortField sortField = sortFieldFrom(remainingFieldTypes.remove(field)); + if (sortField != null) { + sortFields.add(sortField); + } + } + return sortFields.size() > 0 ? new Sort(sortFields.toArray(new SortField[0])) : null; + } + + protected static SortField sortFieldFrom(MappedFieldType type) { + if (type instanceof KeywordFieldMapper.KeywordFieldType) { + return new SortedSetSortField(type.name(), false); + } else if (type instanceof DateFieldMapper.DateFieldType) { + return new SortedNumericSortField(type.name(), SortField.Type.LONG, false); + } else if (type instanceof NumberFieldMapper.NumberFieldType) { + switch (type.typeName()) { + case "byte": + case "short": + case "integer": + return new SortedNumericSortField(type.name(), SortField.Type.INT, false); + case "long": + return new SortedNumericSortField(type.name(), SortField.Type.LONG, false); + case "float": + case "double": + return new SortedNumericSortField(type.name(), SortField.Type.DOUBLE, false); + default: + return null; + } + } + return null; + } + + protected static IndexSettings createIndexSettings(Sort sort) { + Settings.Builder builder = Settings.builder(); + if (sort != null) { + String[] fields = Arrays.stream(sort.getSort()).map(SortField::getField).toArray(String[]::new); + String[] orders = Arrays.stream(sort.getSort()).map((o) -> o.getReverse() ? "desc" : "asc").toArray(String[]::new); + builder.putList("index.sort.field", fields); + builder.putList("index.sort.order", orders); + } + return IndexSettingsModule.newIndexSettings(new Index("_index", "0"), builder.build()); + } + + protected static Map createAfterKey(Object... fields) { + assert fields.length % 2 == 0; + final Map map = new HashMap<>(); + for (int i = 0; i < fields.length; i += 2) { + String field = (String) fields[i]; + map.put(field, fields[i + 1]); + } + return map; + } + + protected static long asLong(String dateTime) { + return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(dateTime)).toInstant().toEpochMilli(); + } +} diff --git a/test/framework/src/main/java/org/opensearch/test/InternalAggregationTestCase.java b/test/framework/src/main/java/org/opensearch/test/InternalAggregationTestCase.java index a4099d66de28e..5325c48e16913 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalAggregationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalAggregationTestCase.java @@ -68,10 +68,6 @@ import org.opensearch.search.aggregations.bucket.filter.FiltersAggregationBuilder; import org.opensearch.search.aggregations.bucket.filter.ParsedFilter; import org.opensearch.search.aggregations.bucket.filter.ParsedFilters; -import org.opensearch.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; -import org.opensearch.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; -import org.opensearch.search.aggregations.bucket.geogrid.ParsedGeoHashGrid; -import org.opensearch.search.aggregations.bucket.geogrid.ParsedGeoTileGrid; import org.opensearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.opensearch.search.aggregations.bucket.global.ParsedGlobal; import org.opensearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder; @@ -275,8 +271,6 @@ public ReduceContext forFinalReduction() { map.put(GlobalAggregationBuilder.NAME, (p, c) -> ParsedGlobal.fromXContent(p, (String) c)); map.put(FilterAggregationBuilder.NAME, (p, c) -> ParsedFilter.fromXContent(p, (String) c)); map.put(InternalSampler.PARSER_NAME, (p, c) -> ParsedSampler.fromXContent(p, (String) c)); - map.put(GeoHashGridAggregationBuilder.NAME, (p, c) -> ParsedGeoHashGrid.fromXContent(p, (String) c)); - map.put(GeoTileGridAggregationBuilder.NAME, (p, c) -> ParsedGeoTileGrid.fromXContent(p, (String) c)); map.put(RangeAggregationBuilder.NAME, (p, c) -> ParsedRange.fromXContent(p, (String) c)); map.put(DateRangeAggregationBuilder.NAME, (p, c) -> ParsedDateRange.fromXContent(p, (String) c)); map.put(GeoDistanceAggregationBuilder.NAME, (p, c) -> ParsedGeoDistance.fromXContent(p, (String) c)); From 9d6255c61748ffdf0afde11a3724d00d4b1f1d05 Mon Sep 17 00:00:00 2001 From: Bharathwaj G <58062316+bharath-techie@users.noreply.github.com> Date: Tue, 23 Aug 2022 18:12:36 +0530 Subject: [PATCH 102/104] Add changes to Point in time segments API service layer (#4105) * pit segments service layer changes Signed-off-by: Bharathwaj G * Addressing comment Signed-off-by: Bharathwaj G * Addressing comment Signed-off-by: Bharathwaj G * Addressing comment Signed-off-by: Bharathwaj G * addressing review comments Signed-off-by: Bharathwaj G * addressing comment Signed-off-by: Bharathwaj G * Addressing comments Signed-off-by: Bharathwaj G * addressing comments Signed-off-by: Bharathwaj G * Addressing comments Signed-off-by: Bharathwaj G * Adding '_all' as option to get all segments Signed-off-by: Bharathwaj G Signed-off-by: Bharathwaj G --- .../org/opensearch/action/ActionModule.java | 3 + .../indices/segments/PitSegmentsAction.java | 24 ++ .../indices/segments/PitSegmentsRequest.java | 87 ++++++ .../segments/TransportPitSegmentsAction.java | 261 ++++++++++++++++++ .../action/search/CreatePitResponse.java | 2 +- .../node/TransportBroadcastByNodeAction.java | 9 +- .../java/org/opensearch/client/Client.java | 7 + .../client/support/AbstractClient.java | 7 + .../cluster/routing/ShardRouting.java | 4 +- .../search/internal/PitReaderContext.java | 24 ++ .../action/search/PitTestsUtil.java | 23 +- .../search/CreatePitSingleNodeTests.java | 16 +- .../opensearch/search/PitMultiNodeTests.java | 3 + 13 files changed, 464 insertions(+), 6 deletions(-) create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/segments/PitSegmentsAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/segments/PitSegmentsRequest.java create mode 100644 server/src/main/java/org/opensearch/action/admin/indices/segments/TransportPitSegmentsAction.java diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index 052d2ec2b5764..797c5c38fada6 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -165,7 +165,9 @@ import org.opensearch.action.admin.indices.rollover.RolloverAction; import org.opensearch.action.admin.indices.rollover.TransportRolloverAction; import org.opensearch.action.admin.indices.segments.IndicesSegmentsAction; +import org.opensearch.action.admin.indices.segments.PitSegmentsAction; import org.opensearch.action.admin.indices.segments.TransportIndicesSegmentsAction; +import org.opensearch.action.admin.indices.segments.TransportPitSegmentsAction; import org.opensearch.action.admin.indices.settings.get.GetSettingsAction; import org.opensearch.action.admin.indices.settings.get.TransportGetSettingsAction; import org.opensearch.action.admin.indices.settings.put.TransportUpdateSettingsAction; @@ -671,6 +673,7 @@ public void reg actions.register(CreatePitAction.INSTANCE, TransportCreatePitAction.class); actions.register(GetAllPitsAction.INSTANCE, TransportGetAllPitsAction.class); actions.register(DeletePitAction.INSTANCE, TransportDeletePitAction.class); + actions.register(PitSegmentsAction.INSTANCE, TransportPitSegmentsAction.class); // Remote Store actions.register(RestoreRemoteStoreAction.INSTANCE, TransportRestoreRemoteStoreAction.class); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/PitSegmentsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/PitSegmentsAction.java new file mode 100644 index 0000000000000..b52ef32a91b16 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/PitSegmentsAction.java @@ -0,0 +1,24 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.segments; + +import org.opensearch.action.ActionType; + +/** + * Action for retrieving segment information for PITs + */ +public class PitSegmentsAction extends ActionType { + + public static final PitSegmentsAction INSTANCE = new PitSegmentsAction(); + public static final String NAME = "indices:monitor/point_in_time/segments"; + + private PitSegmentsAction() { + super(NAME, IndicesSegmentResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/PitSegmentsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/PitSegmentsRequest.java new file mode 100644 index 0000000000000..84f5e5ad6a1e8 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/PitSegmentsRequest.java @@ -0,0 +1,87 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.segments; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.support.broadcast.BroadcastRequest; +import org.opensearch.common.Strings; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import static org.opensearch.action.ValidateActions.addValidationError; + +/** + * Transport request for retrieving PITs segment information + */ +public class PitSegmentsRequest extends BroadcastRequest { + private boolean verbose = false; + private final List pitIds = new ArrayList<>(); + + public PitSegmentsRequest() { + this(Strings.EMPTY_ARRAY); + } + + public PitSegmentsRequest(StreamInput in) throws IOException { + super(in); + pitIds.addAll(Arrays.asList(in.readStringArray())); + verbose = in.readBoolean(); + } + + public PitSegmentsRequest(String... pitIds) { + super(pitIds); + this.pitIds.addAll(Arrays.asList(pitIds)); + } + + /** + * true if detailed information about each segment should be returned, + * false otherwise. + */ + public boolean isVerbose() { + return verbose; + } + + /** + * Sets the verbose option. + * @see #isVerbose() + */ + public void setVerbose(boolean v) { + verbose = v; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArrayNullable((pitIds == null) ? null : pitIds.toArray(new String[pitIds.size()])); + out.writeBoolean(verbose); + } + + public List getPitIds() { + return Collections.unmodifiableList(pitIds); + } + + public void clearAndSetPitIds(List pitIds) { + this.pitIds.clear(); + this.pitIds.addAll(pitIds); + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (pitIds == null || pitIds.isEmpty()) { + validationException = addValidationError("no pit ids specified", validationException); + } + return validationException; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/TransportPitSegmentsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/TransportPitSegmentsAction.java new file mode 100644 index 0000000000000..9d4ece74a7270 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/TransportPitSegmentsAction.java @@ -0,0 +1,261 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.action.admin.indices.segments; + +import org.opensearch.action.ActionListener; +import org.opensearch.action.search.ListPitInfo; +import org.opensearch.action.search.PitService; +import org.opensearch.action.search.SearchContextId; +import org.opensearch.action.search.SearchContextIdForNode; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.DefaultShardOperationFailedException; +import org.opensearch.action.support.broadcast.node.TransportBroadcastByNodeAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.routing.AllocationId; +import org.opensearch.cluster.routing.PlainShardsIterator; +import org.opensearch.cluster.routing.RecoverySource; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.cluster.routing.ShardsIterator; +import org.opensearch.cluster.routing.UnassignedInfo; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Strings; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.index.shard.ShardId; +import org.opensearch.indices.IndicesService; +import org.opensearch.search.SearchService; +import org.opensearch.search.internal.PitReaderContext; +import org.opensearch.tasks.Task; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.opensearch.action.search.SearchContextId.decode; + +/** + * Transport action for retrieving segment information of PITs + */ +public class TransportPitSegmentsAction extends TransportBroadcastByNodeAction { + private final ClusterService clusterService; + private final IndicesService indicesService; + private final SearchService searchService; + private final NamedWriteableRegistry namedWriteableRegistry; + private final TransportService transportService; + private final PitService pitService; + + @Inject + public TransportPitSegmentsAction( + ClusterService clusterService, + TransportService transportService, + IndicesService indicesService, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + SearchService searchService, + NamedWriteableRegistry namedWriteableRegistry, + PitService pitService + ) { + super( + PitSegmentsAction.NAME, + clusterService, + transportService, + actionFilters, + indexNameExpressionResolver, + PitSegmentsRequest::new, + ThreadPool.Names.MANAGEMENT + ); + this.clusterService = clusterService; + this.indicesService = indicesService; + this.searchService = searchService; + this.namedWriteableRegistry = namedWriteableRegistry; + this.transportService = transportService; + this.pitService = pitService; + } + + /** + * Execute PIT segments flow for all PITs or request PIT IDs + */ + @Override + protected void doExecute(Task task, PitSegmentsRequest request, ActionListener listener) { + List pitIds = request.getPitIds(); + if (pitIds.size() == 1 && "_all".equals(pitIds.get(0))) { + pitService.getAllPits(ActionListener.wrap(response -> { + request.clearAndSetPitIds(response.getPitInfos().stream().map(ListPitInfo::getPitId).collect(Collectors.toList())); + super.doExecute(task, request, listener); + }, listener::onFailure)); + } else { + super.doExecute(task, request, listener); + } + } + + /** + * This adds list of shards on which we need to retrieve pit segments details + * @param clusterState the cluster state + * @param request the underlying request + * @param concreteIndices the concrete indices on which to execute the operation + */ + @Override + protected ShardsIterator shards(ClusterState clusterState, PitSegmentsRequest request, String[] concreteIndices) { + final ArrayList iterators = new ArrayList<>(); + for (String pitId : request.getPitIds()) { + SearchContextId searchContext = decode(namedWriteableRegistry, pitId); + for (Map.Entry entry : searchContext.shards().entrySet()) { + final SearchContextIdForNode perNode = entry.getValue(); + // check if node is part of local cluster + if (Strings.isEmpty(perNode.getClusterAlias())) { + final ShardId shardId = entry.getKey(); + iterators.add( + new PitAwareShardRouting( + pitId, + shardId, + perNode.getNode(), + null, + true, + ShardRoutingState.STARTED, + null, + null, + null, + -1L + ) + ); + } + } + } + return new PlainShardsIterator(iterators); + } + + @Override + protected ClusterBlockException checkGlobalBlock(ClusterState state, PitSegmentsRequest request) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + } + + @Override + protected ClusterBlockException checkRequestBlock(ClusterState state, PitSegmentsRequest countRequest, String[] concreteIndices) { + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, concreteIndices); + } + + @Override + protected ShardSegments readShardResult(StreamInput in) throws IOException { + return new ShardSegments(in); + } + + @Override + protected IndicesSegmentResponse newResponse( + PitSegmentsRequest request, + int totalShards, + int successfulShards, + int failedShards, + List results, + List shardFailures, + ClusterState clusterState + ) { + return new IndicesSegmentResponse( + results.toArray(new ShardSegments[results.size()]), + totalShards, + successfulShards, + failedShards, + shardFailures + ); + } + + @Override + protected PitSegmentsRequest readRequestFrom(StreamInput in) throws IOException { + return new PitSegmentsRequest(in); + } + + @Override + public List getShardRoutingsFromInputStream(StreamInput in) throws IOException { + return in.readList(PitAwareShardRouting::new); + } + + /** + * This retrieves segment details of PIT context + * @param request the node-level request + * @param shardRouting the shard on which to execute the operation + */ + @Override + protected ShardSegments shardOperation(PitSegmentsRequest request, ShardRouting shardRouting) { + assert shardRouting instanceof PitAwareShardRouting; + PitAwareShardRouting pitAwareShardRouting = (PitAwareShardRouting) shardRouting; + SearchContextIdForNode searchContextIdForNode = decode(namedWriteableRegistry, pitAwareShardRouting.getPitId()).shards() + .get(shardRouting.shardId()); + PitReaderContext pitReaderContext = searchService.getPitReaderContext(searchContextIdForNode.getSearchContextId()); + if (pitReaderContext == null) { + return new ShardSegments(shardRouting, Collections.emptyList()); + } + return new ShardSegments(pitReaderContext.getShardRouting(), pitReaderContext.getSegments()); + } + + /** + * This holds PIT id which is used to perform broadcast operation in PIT shards to retrieve segments information + */ + public class PitAwareShardRouting extends ShardRouting { + + private final String pitId; + + public PitAwareShardRouting(StreamInput in) throws IOException { + super(in); + this.pitId = in.readString(); + } + + public PitAwareShardRouting( + String pitId, + ShardId shardId, + String currentNodeId, + String relocatingNodeId, + boolean primary, + ShardRoutingState state, + RecoverySource recoverySource, + UnassignedInfo unassignedInfo, + AllocationId allocationId, + long expectedShardSize + ) { + super( + shardId, + currentNodeId, + relocatingNodeId, + primary, + state, + recoverySource, + unassignedInfo, + allocationId, + expectedShardSize + ); + this.pitId = pitId; + } + + public String getPitId() { + return pitId; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(pitId); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + super.toXContent(builder, params); + builder.field("pit_id", pitId); + return builder.endObject(); + } + } +} diff --git a/server/src/main/java/org/opensearch/action/search/CreatePitResponse.java b/server/src/main/java/org/opensearch/action/search/CreatePitResponse.java index 25eb9aff9e3d7..dd197a37f8616 100644 --- a/server/src/main/java/org/opensearch/action/search/CreatePitResponse.java +++ b/server/src/main/java/org/opensearch/action/search/CreatePitResponse.java @@ -28,7 +28,7 @@ * Create point in time response with point in time id and shard success / failures */ public class CreatePitResponse extends ActionResponse implements StatusToXContentObject { - private static final ParseField ID = new ParseField("id"); + private static final ParseField ID = new ParseField("pit_id"); private static final ParseField CREATION_TIME = new ParseField("creation_time"); // point in time id diff --git a/server/src/main/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java b/server/src/main/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java index f849be4db4e2b..9e353a35831d0 100644 --- a/server/src/main/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java +++ b/server/src/main/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java @@ -532,6 +532,13 @@ private void onShardOperation( } } + /** + * This method reads ShardRouting from input stream + */ + public List getShardRoutingsFromInputStream(StreamInput in) throws IOException { + return in.readList(ShardRouting::new); + } + /** * A node request * @@ -547,7 +554,7 @@ public class NodeRequest extends TransportRequest implements IndicesRequest { public NodeRequest(StreamInput in) throws IOException { super(in); indicesLevelRequest = readRequestFrom(in); - shards = in.readList(ShardRouting::new); + shards = getShardRoutingsFromInputStream(in); nodeId = in.readString(); } diff --git a/server/src/main/java/org/opensearch/client/Client.java b/server/src/main/java/org/opensearch/client/Client.java index 1d3bbfcba43f9..94043d5c3c89f 100644 --- a/server/src/main/java/org/opensearch/client/Client.java +++ b/server/src/main/java/org/opensearch/client/Client.java @@ -34,6 +34,8 @@ import org.opensearch.action.ActionFuture; import org.opensearch.action.ActionListener; +import org.opensearch.action.admin.indices.segments.IndicesSegmentResponse; +import org.opensearch.action.admin.indices.segments.PitSegmentsRequest; import org.opensearch.action.bulk.BulkRequest; import org.opensearch.action.bulk.BulkRequestBuilder; import org.opensearch.action.bulk.BulkResponse; @@ -339,6 +341,11 @@ public interface Client extends OpenSearchClient, Releasable { */ void deletePits(DeletePitRequest deletePITRequest, ActionListener listener); + /** + * Get information of segments of one or more PITs + */ + void pitSegments(PitSegmentsRequest pitSegmentsRequest, ActionListener listener); + /** * Performs multiple search requests. */ diff --git a/server/src/main/java/org/opensearch/client/support/AbstractClient.java b/server/src/main/java/org/opensearch/client/support/AbstractClient.java index 7084a856ab3d1..bc80a2ba92bf8 100644 --- a/server/src/main/java/org/opensearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/opensearch/client/support/AbstractClient.java @@ -240,6 +240,8 @@ import org.opensearch.action.admin.indices.segments.IndicesSegmentsAction; import org.opensearch.action.admin.indices.segments.IndicesSegmentsRequest; import org.opensearch.action.admin.indices.segments.IndicesSegmentsRequestBuilder; +import org.opensearch.action.admin.indices.segments.PitSegmentsAction; +import org.opensearch.action.admin.indices.segments.PitSegmentsRequest; import org.opensearch.action.admin.indices.settings.get.GetSettingsAction; import org.opensearch.action.admin.indices.settings.get.GetSettingsRequest; import org.opensearch.action.admin.indices.settings.get.GetSettingsRequestBuilder; @@ -593,6 +595,11 @@ public void deletePits(final DeletePitRequest deletePITRequest, final ActionList execute(DeletePitAction.INSTANCE, deletePITRequest, listener); } + @Override + public void pitSegments(final PitSegmentsRequest request, final ActionListener listener) { + execute(PitSegmentsAction.INSTANCE, request, listener); + } + @Override public ActionFuture multiSearch(MultiSearchRequest request) { return execute(MultiSearchAction.INSTANCE, request); diff --git a/server/src/main/java/org/opensearch/cluster/routing/ShardRouting.java b/server/src/main/java/org/opensearch/cluster/routing/ShardRouting.java index 7dec8f9c84a89..e3aa2a666d454 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/ShardRouting.java +++ b/server/src/main/java/org/opensearch/cluster/routing/ShardRouting.java @@ -54,7 +54,7 @@ * * @opensearch.internal */ -public final class ShardRouting implements Writeable, ToXContentObject { +public class ShardRouting implements Writeable, ToXContentObject { /** * Used if shard size is not available @@ -78,7 +78,7 @@ public final class ShardRouting implements Writeable, ToXContentObject { * A constructor to internally create shard routing instances, note, the internal flag should only be set to true * by either this class or tests. Visible for testing. */ - ShardRouting( + protected ShardRouting( ShardId shardId, String currentNodeId, String relocatingNodeId, diff --git a/server/src/main/java/org/opensearch/search/internal/PitReaderContext.java b/server/src/main/java/org/opensearch/search/internal/PitReaderContext.java index 98e84136a8847..b24a8a4172e29 100644 --- a/server/src/main/java/org/opensearch/search/internal/PitReaderContext.java +++ b/server/src/main/java/org/opensearch/search/internal/PitReaderContext.java @@ -9,12 +9,17 @@ package org.opensearch.search.internal; import org.apache.lucene.util.SetOnce; +import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; import org.opensearch.index.IndexService; import org.opensearch.index.engine.Engine; +import org.opensearch.index.engine.Segment; import org.opensearch.index.shard.IndexShard; +import java.util.Collections; +import java.util.List; + /** * PIT reader context containing PIT specific information such as pit id, create time etc. */ @@ -24,6 +29,15 @@ public class PitReaderContext extends ReaderContext { private final SetOnce pitId = new SetOnce<>(); // Creation time of PIT contexts which helps users to differentiate between multiple PIT reader contexts private final SetOnce creationTime = new SetOnce<>(); + /** + * Shard routing at the time of creation of PIT Reader Context + */ + private final ShardRouting shardRouting; + + /** + * Encapsulates segments constituting the shard at the time of creation of PIT Reader Context. + */ + private final List segments; public PitReaderContext( ShardSearchContextId id, @@ -34,6 +48,8 @@ public PitReaderContext( boolean singleSession ) { super(id, indexService, indexShard, searcherSupplier, keepAliveInMillis, singleSession); + shardRouting = indexShard.routingEntry(); + segments = indexShard.segments(true); } public String getPitId() { @@ -67,4 +83,12 @@ public long getCreationTime() { public void setCreationTime(final long creationTime) { this.creationTime.set(creationTime); } + + public ShardRouting getShardRouting() { + return shardRouting; + } + + public List getSegments() { + return Collections.unmodifiableList(segments); + } } diff --git a/server/src/test/java/org/opensearch/action/search/PitTestsUtil.java b/server/src/test/java/org/opensearch/action/search/PitTestsUtil.java index 433cd9dfa3e89..60a31c62dc32d 100644 --- a/server/src/test/java/org/opensearch/action/search/PitTestsUtil.java +++ b/server/src/test/java/org/opensearch/action/search/PitTestsUtil.java @@ -14,6 +14,9 @@ import org.opensearch.action.ActionFuture; import org.opensearch.action.admin.cluster.state.ClusterStateRequest; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.action.admin.indices.segments.IndicesSegmentResponse; +import org.opensearch.action.admin.indices.segments.PitSegmentsAction; +import org.opensearch.action.admin.indices.segments.PitSegmentsRequest; import org.opensearch.client.Client; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.util.concurrent.AtomicArray; @@ -33,6 +36,8 @@ import java.util.Map; import java.util.concurrent.ExecutionException; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import static org.opensearch.test.OpenSearchTestCase.between; import static org.opensearch.test.OpenSearchTestCase.randomAlphaOfLength; import static org.opensearch.test.OpenSearchTestCase.randomBoolean; @@ -107,7 +112,7 @@ public static void assertUsingGetAllPits(Client client, String id, long creation GetAllPitNodesRequest getAllPITNodesRequest = new GetAllPitNodesRequest(disNodesArr); ActionFuture execute1 = client.execute(GetAllPitsAction.INSTANCE, getAllPITNodesRequest); GetAllPitNodesResponse getPitResponse = execute1.get(); - Assert.assertTrue(getPitResponse.getPitInfos().get(0).getPitId().contains(id)); + assertTrue(getPitResponse.getPitInfos().get(0).getPitId().contains(id)); Assert.assertEquals(getPitResponse.getPitInfos().get(0).getCreationTime(), creationTime); } @@ -128,4 +133,20 @@ public static void assertGetAllPitsEmpty(Client client) throws ExecutionExceptio GetAllPitNodesResponse getPitResponse = execute1.get(); Assert.assertEquals(0, getPitResponse.getPitInfos().size()); } + + public static void assertSegments(boolean isEmpty, String index, long expectedShardSize, Client client) { + PitSegmentsRequest pitSegmentsRequest = new PitSegmentsRequest("_all"); + IndicesSegmentResponse indicesSegmentResponse = client.execute(PitSegmentsAction.INSTANCE, pitSegmentsRequest).actionGet(); + assertTrue(indicesSegmentResponse.getShardFailures() == null || indicesSegmentResponse.getShardFailures().length == 0); + assertEquals(indicesSegmentResponse.getIndices().isEmpty(), isEmpty); + if (!isEmpty) { + assertTrue(indicesSegmentResponse.getIndices().get(index) != null); + assertTrue(indicesSegmentResponse.getIndices().get(index).getIndex().equalsIgnoreCase(index)); + assertEquals(expectedShardSize, indicesSegmentResponse.getIndices().get(index).getShards().size()); + } + } + + public static void assertSegments(boolean isEmpty, Client client) { + assertSegments(isEmpty, "index", 2, client); + } } diff --git a/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java b/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java index b730dc01c4871..a10f004b2ee97 100644 --- a/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java +++ b/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java @@ -33,6 +33,7 @@ import java.util.concurrent.ExecutionException; import static org.hamcrest.CoreMatchers.equalTo; +import static org.opensearch.action.search.PitTestsUtil.assertSegments; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; @@ -68,6 +69,7 @@ public void testCreatePITSuccess() throws ExecutionException, InterruptedExcepti ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); CreatePitResponse pitResponse = execute.get(); PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime()); + assertSegments(false, client()); client().prepareIndex("index").setId("2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); SearchResponse searchResponse = client().prepareSearch("index") .setSize(2) @@ -80,6 +82,7 @@ public void testCreatePITSuccess() throws ExecutionException, InterruptedExcepti validatePitStats("index", 1, 0, 0); validatePitStats("index", 1, 0, 1); service.doClose(); // this kills the keep-alive reaper we have to reset the node after this test + assertSegments(true, client()); validatePitStats("index", 0, 1, 0); validatePitStats("index", 0, 1, 1); } @@ -96,12 +99,14 @@ public void testCreatePITWithMultipleIndicesSuccess() throws ExecutionException, ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); CreatePitResponse response = execute.get(); PitTestsUtil.assertUsingGetAllPits(client(), response.getId(), response.getCreationTime()); + assertSegments(false, client()); assertEquals(4, response.getSuccessfulShards()); assertEquals(4, service.getActiveContexts()); validatePitStats("index", 1, 0, 0); validatePitStats("index1", 1, 0, 0); service.doClose(); + assertSegments(true, client()); validatePitStats("index", 0, 1, 0); validatePitStats("index1", 0, 1, 0); } @@ -115,6 +120,7 @@ public void testCreatePITWithShardReplicasSuccess() throws ExecutionException, I ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); CreatePitResponse pitResponse = execute.get(); PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime()); + assertSegments(false, client()); client().prepareIndex("index").setId("2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); SearchResponse searchResponse = client().prepareSearch("index") .setSize(2) @@ -127,6 +133,7 @@ public void testCreatePITWithShardReplicasSuccess() throws ExecutionException, I validatePitStats("index", 1, 0, 0); validatePitStats("index", 1, 0, 1); service.doClose(); + assertSegments(true, client()); validatePitStats("index", 0, 1, 0); validatePitStats("index", 0, 1, 1); } @@ -144,6 +151,7 @@ public void testCreatePITWithNonExistentIndex() { assertTrue(ex.getMessage().contains("no such index [index1]")); assertEquals(0, service.getActiveContexts()); + assertSegments(true, client()); service.doClose(); } @@ -164,6 +172,7 @@ public void testCreatePITOnCloseIndex() throws ExecutionException, InterruptedEx SearchService service = getInstanceFromNode(SearchService.class); assertEquals(0, service.getActiveContexts()); PitTestsUtil.assertGetAllPitsEmpty(client()); + assertSegments(true, client()); service.doClose(); } @@ -187,6 +196,7 @@ public void testPitSearchOnDeletedIndex() throws ExecutionException, Interrupted SearchService service = getInstanceFromNode(SearchService.class); PitTestsUtil.assertGetAllPitsEmpty(client()); assertEquals(0, service.getActiveContexts()); + assertSegments(true, client()); service.doClose(); } @@ -212,6 +222,7 @@ public void testPitSearchOnCloseIndex() throws ExecutionException, InterruptedEx ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); CreatePitResponse pitResponse = execute.get(); PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime()); + assertSegments(false, client()); SearchService service = getInstanceFromNode(SearchService.class); assertEquals(2, service.getActiveContexts()); validatePitStats("index", 1, 0, 0); @@ -227,7 +238,7 @@ public void testPitSearchOnCloseIndex() throws ExecutionException, InterruptedEx assertTrue(ex.shardFailures()[0].reason().contains("SearchContextMissingException")); assertEquals(0, service.getActiveContexts()); PitTestsUtil.assertGetAllPitsEmpty(client()); - + assertSegments(true, client()); // PIT reader contexts are lost after close, verifying it with open index api client().admin().indices().prepareOpen("index").get(); ex = expectThrows(SearchPhaseExecutionException.class, () -> { @@ -491,6 +502,7 @@ public void testPitAfterUpdateIndex() throws Exception { assertEquals(0, service.getActiveContexts()); validatePitStats("test", 0, 1, 0); PitTestsUtil.assertGetAllPitsEmpty(client()); + assertSegments(true, client()); } } @@ -503,6 +515,7 @@ public void testConcurrentSearches() throws Exception { ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); CreatePitResponse pitResponse = execute.get(); PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime()); + assertSegments(false, client()); Thread[] threads = new Thread[5]; CountDownLatch latch = new CountDownLatch(threads.length); @@ -538,6 +551,7 @@ public void testConcurrentSearches() throws Exception { validatePitStats("index", 0, 1, 0); validatePitStats("index", 0, 1, 1); PitTestsUtil.assertGetAllPitsEmpty(client()); + assertSegments(true, client()); } public void validatePitStats(String index, long expectedPitCurrent, long expectedPitCount, int shardId) throws ExecutionException, diff --git a/server/src/test/java/org/opensearch/search/PitMultiNodeTests.java b/server/src/test/java/org/opensearch/search/PitMultiNodeTests.java index 29126d786770e..d29ccf5b97138 100644 --- a/server/src/test/java/org/opensearch/search/PitMultiNodeTests.java +++ b/server/src/test/java/org/opensearch/search/PitMultiNodeTests.java @@ -51,6 +51,7 @@ import java.util.stream.Collectors; import static org.hamcrest.Matchers.containsString; +import static org.opensearch.action.search.PitTestsUtil.assertSegments; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -85,6 +86,7 @@ public void testPit() throws Exception { assertEquals(2, searchResponse.getTotalShards()); validatePitStats("index", 2, 2); PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime()); + assertSegments(false, client()); } public void testCreatePitWhileNodeDropWithAllowPartialCreationFalse() throws Exception { @@ -112,6 +114,7 @@ public Settings onNodeStopped(String nodeName) throws Exception { ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); CreatePitResponse pitResponse = execute.get(); PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime()); + assertSegments(false, "index", 1, client()); assertEquals(1, pitResponse.getSuccessfulShards()); assertEquals(2, pitResponse.getTotalShards()); SearchResponse searchResponse = client().prepareSearch("index") From 53277674107e90057b99afa316f0ed4166f5a3c7 Mon Sep 17 00:00:00 2001 From: Kunal Kotwani Date: Tue, 23 Aug 2022 10:18:40 -0700 Subject: [PATCH 103/104] Add workflow for changelog verification (#4085) * Add workflow for changelog verification Signed-off-by: Kunal Kotwani * Update format for changelog, add developer documentation Signed-off-by: Kunal Kotwani * Update link reference to be relative to project Signed-off-by: Kunal Kotwani * Fix links for CHANGELOG versions Signed-off-by: Kunal Kotwani * Update contribution guide Signed-off-by: Kunal Kotwani Signed-off-by: Kunal Kotwani --- .github/pull_request_template.md | 7 ++++--- .github/workflows/changelog_verifier.yml | 22 ++++++++++++++++++++++ CHANGELOG.md | 19 +++++++++++++++++++ CONTRIBUTING.md | 20 ++++++++++++++++++++ 4 files changed, 65 insertions(+), 3 deletions(-) create mode 100644 .github/workflows/changelog_verifier.yml create mode 100644 CHANGELOG.md diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index d7981b5113972..c76e27d6dfc7d 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,15 +1,16 @@ ### Description [Describe what this change achieves] - + ### Issues Resolved [List any issues this PR will resolve] - + ### Check List - [ ] New functionality includes testing. - [ ] All tests pass - [ ] New functionality has been documented. - [ ] New functionality has javadoc added -- [ ] Commits are signed per the DCO using --signoff +- [ ] Commits are signed per the DCO using --signoff +- [ ] Commit changes are listed out in CHANGELOG.md file (See: [Changelog](../CONTRIBUTING.md#changelog)) By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. For more information on following Developer Certificate of Origin and signing off your commits, please check [here](https://github.com/opensearch-project/OpenSearch/blob/main/CONTRIBUTING.md#developer-certificate-of-origin). diff --git a/.github/workflows/changelog_verifier.yml b/.github/workflows/changelog_verifier.yml new file mode 100644 index 0000000000000..505b02426f22c --- /dev/null +++ b/.github/workflows/changelog_verifier.yml @@ -0,0 +1,22 @@ +name: "Changelog Verifier" +on: + pull_request: + types: [opened, edited, review_requested, synchronize, reopened, ready_for_review, labeled, unlabeled] + +jobs: + # Enforces the update of a changelog file on every pull request + verify-changelog: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + token: ${{ secrets.GITHUB_TOKEN }} + ref: ${{ github.event.pull_request.head.ref }} + + - uses: dangoslen/dependabot-changelog-helper@v1 + + - uses: stefanzweifel/git-auto-commit-action@v4 + with: + commit_message: "Update changelog" + + - uses: dangoslen/changelog-enforcer@v3 diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000000000..2a02bfdaf0320 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,19 @@ +# CHANGELOG +Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) + +## [Unreleased] +### Added +- Github workflow for changelog verification ([#4085](https://github.com/opensearch-project/OpenSearch/pull/4085)) + +### Changed + +### Deprecated + +### Removed + +### Fixed + +### Security + + +[Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 467c7716cc578..16821b1915032 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -6,6 +6,7 @@ - [Documentation Changes](#documentation-changes) - [Contributing Code](#contributing-code) - [Developer Certificate of Origin](#developer-certificate-of-origin) + - [Changelog](#changelog) - [Review Process](#review-process) # Contributing to OpenSearch @@ -116,6 +117,25 @@ Signed-off-by: Jane Smith ``` You may type this line on your own when writing your commit messages. However, if your user.name and user.email are set in your git configs, you can use `-s` or `--signoff` to add the `Signed-off-by` line to the end of the commit message. +## Changelog + +OpenSearch maintains version specific changelog by enforcing a change to the ongoing [CHANGELOG](CHANGELOG.md) file adhering to the [Keep A Changelog](https://keepachangelog.com/en/1.0.0/) format. + +Briefly, the changes are curated by version, with the changes to the main branch added chronologically to `Unreleased` version. Further, each version has corresponding sections which list out the category of the change - `Added`, `Changed`, `Deprecated`, `Removed`, `Fixed`, `Security`. + + +### How to add my changes to [CHANGELOG](CHANGELOG.md)? + +As a contributor, you must ensure that every pull request has the changes listed out within the corresponding version and appropriate section of [CHANGELOG](CHANGELOG.md) file. + +Adding in the change is two step process - +1. Add your changes to the corresponding section within the CHANGELOG file with dummy pull request information, publish the PR + + `Your change here ([#PR_NUMBER](PR_URL))` + +2. Update the entry for your change in [`CHANGELOG.md`](CHANGELOG.md) and make sure that you reference the pull request there. + + ## Review Process We deeply appreciate everyone who takes the time to make a contribution. We will review all contributions as quickly as possible. As a reminder, [opening an issue](https://github.com/opensearch-project/OpenSearch/issues/new/choose) discussing your change before you make it is the best way to smooth the PR process. This will prevent a rejection because someone else is already working on the problem, or because the solution is incompatible with the architectural direction. From 06bef8e5a386c18d783d71b6583d9037fa9db660 Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Tue, 23 Aug 2022 15:50:26 -0700 Subject: [PATCH 104/104] [Segment Replication] Add update doc integ test for Segment replication (#4234) Signed-off-by: Suraj Singh Signed-off-by: Suraj Singh --- .../replication/SegmentReplicationIT.java | 60 ++++++++++++++++++- 1 file changed, 59 insertions(+), 1 deletion(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java index 8566cc5556861..a9b6787d87bdf 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java @@ -16,6 +16,8 @@ import org.opensearch.action.admin.indices.segments.IndicesSegmentsRequest; import org.opensearch.action.admin.indices.segments.ShardSegments; import org.opensearch.action.support.WriteRequest; +import org.opensearch.action.update.UpdateResponse; +import org.opensearch.client.Requests; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; @@ -46,8 +48,10 @@ import java.util.function.Function; import java.util.stream.Collectors; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.index.query.QueryBuilders.matchQuery; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchHits; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class SegmentReplicationIT extends OpenSearchIntegTestCase { @@ -419,6 +423,60 @@ public void testDeleteOperations() throws Exception { } } + public void testUpdateOperations() throws Exception { + final String primary = internalCluster().startNode(); + createIndex(INDEX_NAME); + ensureYellow(INDEX_NAME); + final String replica = internalCluster().startNode(); + + final int initialDocCount = scaledRandomIntBetween(0, 200); + try ( + BackgroundIndexer indexer = new BackgroundIndexer( + INDEX_NAME, + "_doc", + client(), + -1, + RandomizedTest.scaledRandomIntBetween(2, 5), + false, + random() + ) + ) { + indexer.start(initialDocCount); + waitForDocs(initialDocCount, indexer); + refresh(INDEX_NAME); + waitForReplicaUpdate(); + + // wait a short amount of time to give replication a chance to complete. + assertHitCount(client(primary).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), initialDocCount); + assertHitCount(client(replica).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), initialDocCount); + + final int additionalDocCount = scaledRandomIntBetween(0, 200); + final int expectedHitCount = initialDocCount + additionalDocCount; + indexer.start(additionalDocCount); + waitForDocs(expectedHitCount, indexer); + waitForReplicaUpdate(); + + assertHitCount(client(primary).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), expectedHitCount); + assertHitCount(client(replica).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), expectedHitCount); + + Set ids = indexer.getIds(); + String id = ids.toArray()[0].toString(); + UpdateResponse updateResponse = client(primary).prepareUpdate(INDEX_NAME, id) + .setDoc(Requests.INDEX_CONTENT_TYPE, "foo", "baz") + .setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL) + .get(); + assertFalse("request shouldn't have forced a refresh", updateResponse.forcedRefresh()); + assertEquals(2, updateResponse.getVersion()); + + refresh(INDEX_NAME); + waitForReplicaUpdate(); + + assertSearchHits(client(primary).prepareSearch(INDEX_NAME).setQuery(matchQuery("foo", "baz")).get(), id); + assertSearchHits(client(replica).prepareSearch(INDEX_NAME).setQuery(matchQuery("foo", "baz")).get(), id); + + } + } + private void assertSegmentStats(int numberOfReplicas) throws IOException { final IndicesSegmentResponse indicesSegmentResponse = client().admin().indices().segments(new IndicesSegmentsRequest()).actionGet();