Skip to content

Commit

Permalink
Merge branch 'main' of github.com:opensearch-project/OpenSearch into …
Browse files Browse the repository at this point in the history
…fix-flaky-test
  • Loading branch information
bharath-techie committed Oct 27, 2023
2 parents 3731fe9 + e9affea commit f22f628
Show file tree
Hide file tree
Showing 3 changed files with 16 additions and 40 deletions.
10 changes: 10 additions & 0 deletions server/src/main/java/org/opensearch/index/shard/IndexShard.java
Original file line number Diff line number Diff line change
Expand Up @@ -187,6 +187,7 @@
import org.opensearch.indices.recovery.RecoveryTarget;
import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint;
import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher;
import org.opensearch.indices.replication.common.ReplicationTimer;
import org.opensearch.repositories.RepositoriesService;
import org.opensearch.repositories.Repository;
import org.opensearch.search.suggest.completion.CompletionStats;
Expand Down Expand Up @@ -698,7 +699,16 @@ public void updateShardState(
if (indexSettings.isSegRepEnabled()) {
// this Shard's engine was read only, we need to update its engine before restoring local history from xlog.
assert newRouting.primary() && currentRouting.primary() == false;
ReplicationTimer timer = new ReplicationTimer();
timer.start();
logger.debug(
"Resetting engine on promotion of shard [{}] to primary, startTime {}\n",
shardId,
timer.startTime()
);
resetEngineToGlobalCheckpoint();
timer.stop();
logger.info("Completed engine failover for shard [{}] in: {} ms", shardId, timer.time());
// It is possible an engine can open with a SegmentInfos on a higher gen but the reader does not refresh to
// trigger our refresh listener.
// Force update the checkpoint post engine reset.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@
import org.apache.logging.log4j.core.LogEvent;
import org.apache.logging.log4j.core.appender.AbstractAppender;
import org.apache.logging.log4j.core.filter.RegexFilter;
import org.apache.lucene.codecs.LiveDocsFormat;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.KeywordField;
import org.apache.lucene.document.LongPoint;
Expand Down Expand Up @@ -3237,22 +3236,10 @@ public void testUnreferencedFileCleanUpOnSegmentMergeFailureWithCleanUpEnabled()
MockDirectoryWrapper wrapper = newMockDirectory();
final CountDownLatch cleanupCompleted = new CountDownLatch(1);
MockDirectoryWrapper.Failure fail = new MockDirectoryWrapper.Failure() {
public boolean didFail1;
public boolean didFail2;

@Override
public void eval(MockDirectoryWrapper dir) throws IOException {
if (!doFail) {
return;
}

// Fail segment merge with diskfull during merging terms.
if (callStackContainsAnyOf("mergeTerms") && !didFail1) {
didFail1 = true;
throw new IOException("No space left on device");
}
if (callStackContains(LiveDocsFormat.class, "writeLiveDocs") && !didFail2) {
didFail2 = true;
// Fail segment merge with diskfull during merging terms
if (callStackContainsAnyOf("mergeTerms")) {
throw new IOException("No space left on device");
}
}
Expand Down Expand Up @@ -3325,7 +3312,6 @@ public void onFailedEngine(String reason, Exception e) {
segments = engine.segments(false);
assertThat(segments.size(), equalTo(2));

fail.setDoFail();
// IndexWriter can throw either IOException or IllegalStateException depending on whether tragedy is set or not.
expectThrowsAnyOf(
Arrays.asList(IOException.class, IllegalStateException.class),
Expand All @@ -3345,20 +3331,10 @@ public void testUnreferencedFileCleanUpOnSegmentMergeFailureWithCleanUpDisabled(
MockDirectoryWrapper wrapper = newMockDirectory();
final CountDownLatch cleanupCompleted = new CountDownLatch(1);
MockDirectoryWrapper.Failure fail = new MockDirectoryWrapper.Failure() {
public boolean didFail1;
public boolean didFail2;

@Override
public void eval(MockDirectoryWrapper dir) throws IOException {
if (!doFail) {
return;
}
if (callStackContainsAnyOf("mergeTerms") && !didFail1) {
didFail1 = true;
throw new IOException("No space left on device");
}
if (callStackContains(LiveDocsFormat.class, "writeLiveDocs") && !didFail2) {
didFail2 = true;
if (callStackContainsAnyOf("mergeTerms")) {
throw new IOException("No space left on device");
}
}
Expand Down Expand Up @@ -3439,7 +3415,6 @@ public void onFailedEngine(String reason, Exception e) {
segments = engine.segments(false);
assertThat(segments.size(), equalTo(2));

fail.setDoFail();
// IndexWriter can throw either IOException or IllegalStateException depending on whether tragedy is set or not.
expectThrowsAnyOf(
Arrays.asList(IOException.class, IllegalStateException.class),
Expand All @@ -3459,20 +3434,10 @@ public void testUnreferencedFileCleanUpFailsOnSegmentMergeFailureWhenDirectoryCl
MockDirectoryWrapper wrapper = newMockDirectory();
final CountDownLatch cleanupCompleted = new CountDownLatch(1);
MockDirectoryWrapper.Failure fail = new MockDirectoryWrapper.Failure() {
public boolean didFail1;
public boolean didFail2;

@Override
public void eval(MockDirectoryWrapper dir) throws IOException {
if (!doFail) {
return;
}
if (callStackContainsAnyOf("mergeTerms") && !didFail1) {
didFail1 = true;
throw new IOException("No space left on device");
}
if (callStackContains(LiveDocsFormat.class, "writeLiveDocs") && !didFail2) {
didFail2 = true;
if (callStackContainsAnyOf("mergeTerms")) {
throw new IOException("No space left on device");
}
}
Expand Down Expand Up @@ -3537,7 +3502,6 @@ public void onFailedEngine(String reason, Exception e) {
segments = engine.segments(false);
assertThat(segments.size(), equalTo(2));

fail.setDoFail();
// Close the store so that unreferenced file cleanup will fail.
store.close();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -362,6 +362,7 @@ public void testPrimaryRestart() throws Exception {
* prevent FileAlreadyExistsException. It does so by only copying files in first round of segment replication without
* committing locally so that in next round of segment replication those files are not considered for download again
*/
@AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/10885")
public void testSegRepSucceedsOnPreviousCopiedFiles() throws Exception {
try (ReplicationGroup shards = createGroup(1, getIndexSettings(), new NRTReplicationEngineFactory())) {
shards.startAll();
Expand Down Expand Up @@ -453,6 +454,7 @@ public void onReplicationFailure(
* blocking update of reader. Once this is done, it corrupts one segment file and ensure that file is deleted in next
* round of segment replication by ensuring doc count.
*/
@AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/10885")
public void testNoFailuresOnFileReads() throws Exception {
try (ReplicationGroup shards = createGroup(1, getIndexSettings(), new NRTReplicationEngineFactory())) {
shards.startAll();
Expand Down

0 comments on commit f22f628

Please sign in to comment.