Skip to content

Commit

Permalink
Run tests with remote store always
Browse files Browse the repository at this point in the history
Signed-off-by: Sachin Kale <[email protected]>
  • Loading branch information
Sachin Kale committed Oct 18, 2023
1 parent 71e3022 commit f0b6311
Show file tree
Hide file tree
Showing 103 changed files with 752 additions and 270 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@

package org.opensearch.action;

import org.apache.lucene.tests.util.LuceneTestCase;
import org.opensearch.action.admin.indices.alias.Alias;
import org.opensearch.action.admin.indices.analyze.AnalyzeAction;
import org.opensearch.action.admin.indices.cache.clear.ClearIndicesCacheAction;
Expand Down Expand Up @@ -133,6 +134,7 @@
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.hasItem;

@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/skip_as_test_checks_replication_transport_calls")
@ClusterScope(scope = Scope.SUITE, numClientNodes = 1, minNumDataNodes = 2)
public class IndicesRequestIT extends OpenSearchIntegTestCase {

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@
import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksAction;
import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
import org.opensearch.action.admin.indices.refresh.RefreshAction;
import org.opensearch.action.admin.indices.settings.get.GetSettingsRequest;
import org.opensearch.action.admin.indices.upgrade.post.UpgradeAction;
import org.opensearch.action.admin.indices.validate.query.ValidateQueryAction;
import org.opensearch.action.bulk.BulkAction;
Expand All @@ -54,6 +55,7 @@
import org.opensearch.action.support.WriteRequest;
import org.opensearch.action.support.replication.ReplicationResponse;
import org.opensearch.action.support.replication.TransportReplicationActionTests;
import org.opensearch.cluster.metadata.IndexMetadata;
import org.opensearch.common.action.ActionFuture;
import org.opensearch.common.collect.Tuple;
import org.opensearch.common.regex.Regex;
Expand All @@ -77,6 +79,7 @@
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.CyclicBarrier;
Expand Down Expand Up @@ -249,7 +252,15 @@ public void testTransportBroadcastReplicationTasks() {
}

// we will have as many [s][p] and [s][r] tasks as we have primary and replica shards
assertEquals(numberOfShards.totalNumShards, numberOfEvents(RefreshAction.NAME + "[s][*]", Tuple::v1));
GetSettingsRequest getSettingsRequest = new GetSettingsRequest().indices("test");
String remoteStoreEnabledStr = client().admin().indices().getSettings(getSettingsRequest).actionGet().getSetting("test", IndexMetadata.SETTING_REMOTE_STORE_ENABLED);
logger.warn("IndexSettings (" + remoteStoreEnabledStr + ")");
if(Objects.equals(remoteStoreEnabledStr, "true")) {
assertEquals(numberOfShards.numPrimaries, numberOfEvents(RefreshAction.NAME + "[s][*]", Tuple::v1));
}
else {
assertEquals(numberOfShards.totalNumShards, numberOfEvents(RefreshAction.NAME + "[s][*]", Tuple::v1));
}

// we the [s][p] and [s][r] tasks should have a corresponding [s] task on the same node as a parent
List<TaskInfo> spEvents = findEvents(RefreshAction.NAME + "[s][*]", Tuple::v1);
Expand Down Expand Up @@ -329,7 +340,14 @@ public void testTransportBulkTasks() {

// we should get as many [s][r] operations as we have replica shards
// they all should have the same shard task as a parent
assertEquals(getNumShards("test").numReplicas, numberOfEvents(BulkAction.NAME + "[s][r]", Tuple::v1));
GetSettingsRequest getSettingsRequest = new GetSettingsRequest().indices("test");
String remoteStoreEnabledStr = client().admin().indices().getSettings(getSettingsRequest).actionGet().getSetting("test", IndexMetadata.SETTING_REMOTE_STORE_ENABLED);
logger.warn("IndexSettings (" + remoteStoreEnabledStr + ")");
if(Objects.equals(remoteStoreEnabledStr, "true")) {
assertEquals(0, numberOfEvents(BulkAction.NAME + "[s][r]", Tuple::v1));
} else {
assertEquals(getNumShards("test").numReplicas, numberOfEvents(BulkAction.NAME + "[s][r]", Tuple::v1));
}
assertParentTask(findEvents(BulkAction.NAME + "[s][r]", Tuple::v1), shardTask);
}

Expand Down Expand Up @@ -808,7 +826,7 @@ public void testTaskStoringSuccessfulResult() throws Exception {
Map<?, ?> result = taskResult.getResponseAsMap();
assertEquals("0", result.get("failure_count").toString());

assertNoFailures(client().admin().indices().prepareRefresh(TaskResultsService.TASK_INDEX).get());
refresh(TaskResultsService.TASK_INDEX);

SearchResponse searchResponse = client().prepareSearch(TaskResultsService.TASK_INDEX)
.setSource(SearchSourceBuilder.searchSource().query(QueryBuilders.termQuery("task.action", taskInfo.getAction())))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
import org.apache.lucene.search.SortField;
import org.apache.lucene.search.SortedSetSelector;
import org.apache.lucene.search.SortedSetSortField;
import org.apache.lucene.tests.util.LuceneTestCase;
import org.apache.lucene.util.Constants;
import org.opensearch.Version;
import org.opensearch.action.admin.cluster.reroute.ClusterRerouteResponse;
Expand Down Expand Up @@ -89,6 +90,7 @@
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThanOrEqualTo;

@LuceneTestCase.AwaitsFix(bugUrl = "Fails with Segment Replication")
public class ShrinkIndexIT extends OpenSearchIntegTestCase {

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ protected boolean useSegmentReplication() {
return false;
}

@AwaitsFix(bugUrl = "https://quip-amazon.com/WmWbAWnrpxuq/Tests-Exclude-with-SegRep#temp:C:YSJ8bd40ae5586c46d494366fc3a")
public void testForceMergeUUIDConsistent() throws Exception {
internalCluster().ensureAtLeastNumDataNodes(2);
final String index = "test-index";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@

package org.opensearch.action.bulk;

import org.apache.lucene.tests.util.LuceneTestCase;
import org.opensearch.action.index.IndexRequest;
import org.opensearch.action.support.WriteRequest;
import org.opensearch.common.action.ActionFuture;
Expand All @@ -48,6 +49,7 @@
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;

@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 2)
@LuceneTestCase.AwaitsFix(bugUrl = "Muting as the test asserts on local and global checkpoint")
public class BulkRejectionIT extends OpenSearchIntegTestCase {

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,7 @@
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.nullValue;

@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST)
public class BulkWithUpdatesIT extends OpenSearchIntegTestCase {

@Override
Expand Down Expand Up @@ -661,6 +662,7 @@ public void testThatMissingIndexDoesNotAbortFullBulkRequest() throws Exception {
.setRefreshPolicy(RefreshPolicy.IMMEDIATE);

client().bulk(bulkRequest).get();
refresh();
SearchResponse searchResponse = client().prepareSearch("bulkindex*").get();
assertHitCount(searchResponse, 3);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ public void testLocalClusterAlias() {
indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL);
IndexResponse indexResponse = client().index(indexRequest).actionGet();
assertEquals(RestStatus.CREATED, indexResponse.status());

refresh();
{
SearchRequest searchRequest = SearchRequest.subSearchRequest(
new SearchRequest(),
Expand Down Expand Up @@ -198,6 +198,7 @@ public void testAbsoluteStartMillis() {
IndexResponse indexResponse = client().index(indexRequest).actionGet();
assertEquals(RestStatus.CREATED, indexResponse.status());
}
refresh();
{
SearchRequest searchRequest = new SearchRequest();
SearchResponse searchResponse = client().search(searchRequest).actionGet();
Expand Down Expand Up @@ -251,7 +252,8 @@ public void testFinalReduce() {
IndexResponse indexResponse = client().index(indexRequest).actionGet();
assertEquals(RestStatus.CREATED, indexResponse.status());
}
client().admin().indices().prepareRefresh("test").get();
//client().admin().indices().prepareRefresh("test").get();
refresh("test");

SearchRequest originalRequest = new SearchRequest();
SearchSourceBuilder source = new SearchSourceBuilder();
Expand Down Expand Up @@ -498,6 +500,7 @@ public void testCircuitBreakerFetchFail() throws Exception {
int numShards = randomIntBetween(1, 10);
int numDocs = numShards * 10;
indexSomeDocs("boom", numShards, numDocs);
refresh("boom");

final AtomicArray<Exception> exceptions = new AtomicArray<>(10);
final CountDownLatch latch = new CountDownLatch(10);
Expand Down Expand Up @@ -536,7 +539,7 @@ private void indexSomeDocs(String indexName, int numberOfShards, int numberOfDoc
IndexResponse indexResponse = client().prepareIndex(indexName).setSource("number", randomInt()).get();
assertEquals(RestStatus.CREATED, indexResponse.status());
}
client().admin().indices().prepareRefresh(indexName).get();
refresh(indexName);
}

private long requestBreakerUsed() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@

import org.opensearch.action.ActionRequest;
import org.opensearch.action.ActionType;
import org.opensearch.action.admin.indices.settings.get.GetSettingsRequest;
import org.opensearch.action.support.ActionFilters;
import org.opensearch.cluster.action.shard.ShardStateAction;
import org.opensearch.cluster.metadata.IndexMetadata;
Expand Down Expand Up @@ -69,6 +70,7 @@
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.Objects;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
Expand Down Expand Up @@ -219,7 +221,14 @@ public void testRetryOnStoppedTransportService() throws Exception {

TestPlugin primaryTestPlugin = getTestPlugin(primary);
// this test only provoked an issue for the primary action, but for completeness, we pick the action randomly
primaryTestPlugin.testActionName = TestAction.ACTION_NAME + (randomBoolean() ? "[p]" : "[r]");
GetSettingsRequest getSettingsRequest = new GetSettingsRequest().indices("test");
String remoteStoreEnabledStr = client().admin().indices().getSettings(getSettingsRequest).actionGet().getSetting("test", IndexMetadata.SETTING_REMOTE_STORE_ENABLED);
logger.warn("IndexSettings (" + remoteStoreEnabledStr + ")");
if(Objects.equals(remoteStoreEnabledStr, "true")) {
primaryTestPlugin.testActionName = TestAction.ACTION_NAME + (randomBoolean() ? "[p]" : "[p]");
} else {
primaryTestPlugin.testActionName = TestAction.ACTION_NAME + (randomBoolean() ? "[p]" : "[r]");
}
logger.info("--> Test action {}, primary {}, replica {}", primaryTestPlugin.testActionName, primary, replica);

AtomicReference<Object> response = new AtomicReference<>();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@
import org.opensearch.index.engine.VersionConflictEngineException;
import org.opensearch.plugins.Plugin;
import org.opensearch.test.MockKeywordPlugin;
import org.opensearch.test.OpenSearchIntegTestCase;

import java.io.IOException;
import java.util.ArrayList;
Expand All @@ -72,6 +73,7 @@
import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue;

@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST)
public class GetTermVectorsIT extends AbstractTermVectorsTestCase {

public GetTermVectorsIT(Settings dynamicSettings) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@
import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.nullValue;

@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST)
public class ClusterHealthIT extends OpenSearchIntegTestCase {

public void testSimpleLocalHealth() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -263,6 +263,7 @@ void checkWriteAction(ActionRequestBuilder<?, ?> builder) {
}
}

@AwaitsFix(bugUrl = "https://quip-amazon.com/WmWbAWnrpxuq/Tests-Exclude-with-SegRep#temp:C:YSJ2bed09f5cc2b490c80f76e023")
public void testNoClusterManagerActionsWriteClusterManagerBlock() throws Exception {
Settings settings = Settings.builder()
.put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), false)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.not;

@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, numDataNodes = 0)
@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0)
public class AllocationIdIT extends OpenSearchIntegTestCase {

@Override
Expand Down Expand Up @@ -170,10 +170,15 @@ public void testFailedRecoveryOnAllocateStalePrimaryRequiresAnotherAllocateStale
node2 = internalCluster().startNode(node2DataPathSettings);
ensureGreen(indexName);

assertThat(historyUUID(node1, indexName), not(equalTo(historyUUID)));
// historyUUID remains same as we are fetching data from remote store
assertThat(historyUUID(node1, indexName), equalTo(historyUUID));
assertThat(historyUUID(node1, indexName), equalTo(historyUUID(node2, indexName)));

internalCluster().assertSameDocIdsOnShards();

// This is only applicable for remote store as we restore the entire data
assertHitCount(client(node1).prepareSearch(indexName).setQuery(matchAllQuery()).get(), numDocs + numExtraDocs);
assertHitCount(client(node2).prepareSearch(indexName).setQuery(matchAllQuery()).get(), numDocs + numExtraDocs);
}

public void checkHealthStatus(String indexName, ClusterHealthStatus healthStatus) {
Expand All @@ -189,7 +194,7 @@ private int indexDocs(String indexName, Object... source) throws InterruptedExce
// index some docs in several segments
int numDocs = 0;
for (int k = 0, attempts = randomIntBetween(5, 10); k < attempts; k++) {
final int numExtraDocs = between(10, 100);
final int numExtraDocs = between(10, 20);
IndexRequestBuilder[] builders = new IndexRequestBuilder[numExtraDocs];
for (int i = 0; i < builders.length; i++) {
builders[i] = client().prepareIndex(indexName).setSource(source);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,7 @@ protected boolean useSegmentReplication() {
return false;
}

@AwaitsFix(bugUrl = "Test asserts on local and global checkpoint")
public void testBulkWeirdScenario() throws Exception {
String clusterManager = internalCluster().startClusterManagerOnlyNode(Settings.EMPTY);
internalCluster().startDataOnlyNodes(2);
Expand Down Expand Up @@ -298,6 +299,7 @@ public void testFailedAllocationOfStalePrimaryToDataNodeWithNoData() throws Exce
);
}

@AwaitsFix(bugUrl = "Test asserts on local and global checkpoint")
public void testForceStaleReplicaToBePromotedToPrimary() throws Exception {
logger.info("--> starting 3 nodes, 1 cluster-manager, 2 data");
String clusterManager = internalCluster().startClusterManagerOnlyNode(Settings.EMPTY);
Expand Down Expand Up @@ -380,7 +382,8 @@ public void testForceStaleReplicaToBePromotedToPrimary() throws Exception {
.get()
.getShards();
for (ShardStats shardStat : shardStats) {
assertThat(shardStat.getCommitStats().getNumDocs(), equalTo(useStaleReplica ? 1 : 0));
// This change is made as with remote store stale replica will also be updated.
assertThat(shardStat.getCommitStats().getNumDocs(), equalTo(2));//equalTo(useStaleReplica ? 1 : 0));
}
// allocation id of old primary was cleaned from the in-sync set
final ClusterState state = client().admin().cluster().prepareState().get().getState();
Expand Down Expand Up @@ -664,6 +667,7 @@ public void testForceAllocatePrimaryOnNoDecision() throws Exception {
/**
* This test asserts that replicas failed to execute resync operations will be failed but not marked as stale.
*/
@AwaitsFix(bugUrl = "https://quip-amazon.com/WmWbAWnrpxuq/Tests-Exclude-with-SegRep#temp:C:YSJbc6c73504d8842cd82aa8f680")
public void testPrimaryReplicaResyncFailed() throws Exception {
String clusterManager = internalCluster().startClusterManagerOnlyNode(Settings.EMPTY);
final int numberOfReplicas = between(2, 3);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -135,8 +135,8 @@ public void removeFilesystemProvider() {
defaultFileSystem = null;
}

private static final long WATERMARK_BYTES = new ByteSizeValue(10, ByteSizeUnit.KB).getBytes();
private static final long TOTAL_SPACE_BYTES = new ByteSizeValue(100, ByteSizeUnit.KB).getBytes();
private static final long WATERMARK_BYTES = new ByteSizeValue(1, ByteSizeUnit.KB).getBytes();
private static final long TOTAL_SPACE_BYTES = new ByteSizeValue(10, ByteSizeUnit.KB).getBytes();
private static final String INDEX_ROUTING_ALLOCATION_NODE_SETTING = "index.routing.allocation.include._name";

@Override
Expand Down Expand Up @@ -537,7 +537,7 @@ private Set<ShardRouting> getShardRoutings(final String nodeId, final String ind
*/
private long createReasonableSizedShards(final String indexName) throws InterruptedException {
while (true) {
final IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[scaledRandomIntBetween(100, 10000)];
final IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[scaledRandomIntBetween(100, 100)];
for (int i = 0; i < indexRequestBuilders.length; i++) {
indexRequestBuilders[i] = client().prepareIndex(indexName).setSource("field", randomAlphaOfLength(10));
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -611,6 +611,7 @@ public void testOpenIndexOverLimit() {
public void testIgnoreDotSettingOnMultipleNodes() throws IOException, InterruptedException {
int maxAllowedShardsPerNode = 10, indexPrimaryShards = 11, indexReplicaShards = 1;

this.nodeAttributeSettings = null;
InternalTestCluster cluster = new InternalTestCluster(
randomLong(),
createTempDir(),
Expand Down Expand Up @@ -647,6 +648,8 @@ public Path nodeConfigPath(int nodeOrdinal) {
);
cluster.beforeTest(random());

OpenSearchIntegTestCase.remoteStoreNodeAttributeCluster = cluster;

// Starting 3 ClusterManagerOnlyNode nodes
cluster.startClusterManagerOnlyNode(Settings.builder().put("cluster.ignore_dot_indexes", true).build());
cluster.startClusterManagerOnlyNode(Settings.builder().put("cluster.ignore_dot_indexes", false).build());
Expand All @@ -656,6 +659,8 @@ public Path nodeConfigPath(int nodeOrdinal) {
cluster.startDataOnlyNode(Settings.builder().put("cluster.ignore_dot_indexes", false).build());
cluster.startDataOnlyNode(Settings.builder().put("cluster.ignore_dot_indexes", false).build());

OpenSearchIntegTestCase.remoteStoreNodeAttributeCluster = null;

// Setting max shards per node to be 10
cluster.client()
.admin()
Expand Down
Loading

0 comments on commit f0b6311

Please sign in to comment.