diff --git a/server/src/main/java/org/elasticsearch/index/engine/NoOpEngine.java b/server/src/main/java/org/elasticsearch/index/engine/NoOpEngine.java index 8dee39f7050cb..49e0ae0587085 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/NoOpEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/NoOpEngine.java @@ -48,7 +48,7 @@ public final class NoOpEngine extends ReadOnlyEngine { public NoOpEngine(EngineConfig config) { this( config, - config.isPromotableToPrimary() ? null : new TranslogStats(0, 0, 0, 0, 0), + config.isPromotableToPrimary() && config.getTranslogConfig().hasTranslog() ? null : new TranslogStats(0, 0, 0, 0, 0), config.isPromotableToPrimary() ? null : new SeqNoStats( diff --git a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java index c1d11223fa55e..1d032c1f400ef 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java @@ -98,7 +98,7 @@ public class ReadOnlyEngine extends Engine { public ReadOnlyEngine( EngineConfig config, SeqNoStats seqNoStats, - TranslogStats translogStats, + @Nullable TranslogStats translogStats, boolean obtainLock, Function readerWrapperFunction, boolean requireCompleteHistory, @@ -251,6 +251,7 @@ private static SeqNoStats buildSeqNoStats(EngineConfig config, SegmentInfos info } private static TranslogStats translogStats(final EngineConfig config, final SegmentInfos infos) throws IOException { + assert config.getTranslogConfig().hasTranslog(); final String translogUuid = infos.getUserData().get(Translog.TRANSLOG_UUID_KEY); if (translogUuid == null) { throw new IllegalStateException("commit doesn't contain translog unique id"); diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index a76feff84e61b..966764d2797c9 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -1487,6 +1487,27 @@ public void flush(FlushRequest request, ActionListener listener) { }); } + /** + * @return true the shard has a translog. + */ + public boolean hasTranslog() { + return translogConfig.hasTranslog(); + } + + /** + * Reads the global checkpoint from the translog checkpoint file if the shard has a translog. Otherwise, reads the local checkpoint from + * the provided commit user data. + * + * @return the global checkpoint to use for recovery + * @throws IOException + */ + public long readGlobalCheckpointForRecovery(Map commitUserData) throws IOException { + if (hasTranslog()) { + return Translog.readGlobalCheckpoint(translogConfig.getTranslogPath(), commitUserData.get(Translog.TRANSLOG_UUID_KEY)); + } + return Long.parseLong(commitUserData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)); + } + /** * checks and removes translog files that no longer need to be retained. See * {@link org.elasticsearch.index.translog.TranslogDeletionPolicy} for details @@ -1859,8 +1880,7 @@ public void recoverLocallyUpToGlobalCheckpoint(ActionListener recoveryStar } assert routingEntry().recoverySource().getType() == RecoverySource.Type.PEER : "not a peer recovery [" + routingEntry() + "]"; try { - final var translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(Translog.TRANSLOG_UUID_KEY); - final var globalCheckpoint = Translog.readGlobalCheckpoint(translogConfig.getTranslogPath(), translogUUID); + final var globalCheckpoint = readGlobalCheckpointForRecovery(store.readLastCommittedSegmentsInfo().getUserData()); final var safeCommit = store.findSafeIndexCommit(globalCheckpoint); ActionListener.run(recoveryStartingSeqNoListener.delegateResponse((l, e) -> { logger.debug(() -> format("failed to recover shard locally up to global checkpoint %s", globalCheckpoint), e); @@ -2084,8 +2104,7 @@ private void loadGlobalCheckpointToReplicationTracker() throws IOException { // we have to set it before we open an engine and recover from the translog because // acquiring a snapshot from the translog causes a sync which causes the global checkpoint to be pulled in, // and an engine can be forced to close in ctor which also causes the global checkpoint to be pulled in. - final String translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(Translog.TRANSLOG_UUID_KEY); - final long globalCheckpoint = Translog.readGlobalCheckpoint(translogConfig.getTranslogPath(), translogUUID); + final long globalCheckpoint = readGlobalCheckpointForRecovery(store.readLastCommittedSegmentsInfo().getUserData()); replicationTracker.updateGlobalCheckpointOnReplica(globalCheckpoint, "read from translog checkpoint"); } else { replicationTracker.updateGlobalCheckpointOnReplica(globalCheckPointIfUnpromotable, "from CleanFilesRequest"); @@ -2162,7 +2181,7 @@ private void innerOpenEngineAndTranslog(LongSupplier globalCheckpointSupplier) t // time elapses after the engine is created above (pulling the config settings) until we set the engine reference, during // which settings changes could possibly have happened, so here we forcefully push any config changes to the new engine. onSettingsChanged(); - assert assertSequenceNumbersInCommit(); + assert assertLastestCommitUserData(); recoveryState.validateCurrentStage(RecoveryState.Stage.TRANSLOG); checkAndCallWaitForEngineOrClosedShardListeners(); } @@ -2183,9 +2202,13 @@ private Engine createEngine(EngineConfig config) { } } - private boolean assertSequenceNumbersInCommit() throws IOException { + /** + * Asserts that the latest Lucene commit contains expected information about sequence numbers or ES version. + */ + private boolean assertLastestCommitUserData() throws IOException { final SegmentInfos segmentCommitInfos = SegmentInfos.readLatestCommit(store.directory()); final Map userData = segmentCommitInfos.getUserData(); + // Ensure sequence numbers are present in commit data assert userData.containsKey(SequenceNumbers.LOCAL_CHECKPOINT_KEY) : "commit point doesn't contains a local checkpoint"; assert userData.containsKey(SequenceNumbers.MAX_SEQ_NO) : "commit point doesn't contains a maximum sequence number"; assert userData.containsKey(Engine.HISTORY_UUID_KEY) : "commit point doesn't contains a history uuid"; @@ -2195,10 +2218,16 @@ private boolean assertSequenceNumbersInCommit() throws IOException { + "] is different than engine [" + getHistoryUUID() + "]"; + assert userData.containsKey(Engine.MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID) : "opening index which was created post 5.5.0 but " + Engine.MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID + " is not found in commit"; + + // From 7.16.0, the ES version is included in the Lucene commit user data as well as in the snapshot metadata in the repository. + // This is used during primary failover to detect if the latest snapshot can be used to recover the new primary, because the failed + // primary may have created new segments in a more recent Lucene version, that may have been later snapshotted, meaning that the + // snapshotted files cannot be recovered on a node with a less recent Lucene version. Note that for versions <= 7.15 this assertion + // relies in the previous minor having a different lucene version. final org.apache.lucene.util.Version commitLuceneVersion = segmentCommitInfos.getCommitLuceneVersion(); - // This relies in the previous minor having another lucene version assert commitLuceneVersion.onOrAfter(RecoverySettings.SEQ_NO_SNAPSHOT_RECOVERIES_SUPPORTED_VERSION.luceneVersion()) == false || userData.containsKey(Engine.ES_VERSION) && Engine.readIndexVersion(userData.get(Engine.ES_VERSION)).onOrBefore(IndexVersion.current()) diff --git a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index 42f62cf86545b..06f9b3e6c8943 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -315,7 +315,7 @@ void recoverFromRepository(final IndexShard indexShard, Repository repository, A RecoverySource.Type recoveryType = indexShard.recoveryState().getRecoverySource().getType(); assert recoveryType == RecoverySource.Type.SNAPSHOT : "expected snapshot recovery type: " + recoveryType; SnapshotRecoverySource recoverySource = (SnapshotRecoverySource) indexShard.recoveryState().getRecoverySource(); - restore(indexShard, repository, recoverySource, recoveryListener(indexShard, listener).map(ignored -> true)); + recoverFromRepository(indexShard, repository, recoverySource, recoveryListener(indexShard, listener).map(ignored -> true)); } else { listener.onResponse(false); } @@ -459,7 +459,7 @@ private void internalRecoverFromStore(IndexShard indexShard, ActionListener outerListener ) { + assert indexShard.shardRouting.primary() : "only primary shards can recover from snapshot"; logger.debug("restoring from {} ...", indexShard.recoveryState().getRecoverySource()); record ShardAndIndexIds(IndexId indexId, ShardId shardId) {} @@ -538,13 +539,13 @@ record ShardAndIndexIds(IndexId indexId, ShardId shardId) {} .newForked(indexShard::preRecovery) .andThen(shardAndIndexIdsListener -> { - final RecoveryState.Translog translogState = indexShard.recoveryState().getTranslog(); if (restoreSource == null) { throw new IndexShardRestoreFailedException(shardId, "empty restore source"); } if (logger.isTraceEnabled()) { logger.trace("[{}] restoring shard [{}]", restoreSource.snapshot(), shardId); } + final RecoveryState.Translog translogState = indexShard.recoveryState().getTranslog(); translogState.totalOperations(0); translogState.totalOperationsOnStart(0); indexShard.prepareForIndexRecovery(); @@ -588,9 +589,7 @@ record ShardAndIndexIds(IndexId indexId, ShardId shardId) {} .andThen(l -> { indexShard.getIndexEventListener().afterFilesRestoredFromRepository(indexShard); - final Store store = indexShard.store(); - bootstrap(indexShard, store); - assert indexShard.shardRouting.primary() : "only primary shards can recover from store"; + bootstrap(indexShard); writeEmptyRetentionLeasesFile(indexShard); indexShard.openEngineAndRecoverFromTranslog(l); }) @@ -610,19 +609,37 @@ record ShardAndIndexIds(IndexId indexId, ShardId shardId) {} })); } + /** + * @deprecated use {@link #bootstrap(IndexShard)} instead + */ + @Deprecated(forRemoval = true) public static void bootstrap(final IndexShard indexShard, final Store store) throws IOException { - if (indexShard.indexSettings.getIndexMetadata().isSearchableSnapshot() == false) { - // not bootstrapping new history for searchable snapshots (which are read-only) allows sequence-number based peer recoveries + assert indexShard.store() == store; + bootstrap(indexShard); + } + + private static void bootstrap(final IndexShard indexShard) throws IOException { + assert indexShard.routingEntry().primary(); + final var store = indexShard.store(); + store.incRef(); + try { + final var translogLocation = indexShard.shardPath().resolveTranslog(); + if (indexShard.hasTranslog() == false) { + Translog.deleteAll(translogLocation); + return; + } store.bootstrapNewHistory(); + final SegmentInfos segmentInfos = store.readLastCommittedSegmentsInfo(); + final long localCheckpoint = Long.parseLong(segmentInfos.userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)); + final String translogUUID = Translog.createEmptyTranslog( + translogLocation, + localCheckpoint, + indexShard.shardId(), + indexShard.getPendingPrimaryTerm() + ); + store.associateIndexWithNewTranslog(translogUUID); + } finally { + store.decRef(); } - final SegmentInfos segmentInfos = store.readLastCommittedSegmentsInfo(); - final long localCheckpoint = Long.parseLong(segmentInfos.userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)); - final String translogUUID = Translog.createEmptyTranslog( - indexShard.shardPath().resolveTranslog(), - localCheckpoint, - indexShard.shardId(), - indexShard.getPendingPrimaryTerm() - ); - store.associateIndexWithNewTranslog(translogUUID); } } diff --git a/server/src/main/java/org/elasticsearch/index/store/ByteSizeCachingDirectory.java b/server/src/main/java/org/elasticsearch/index/store/ByteSizeCachingDirectory.java index 166f0eadc62b4..033859bd62128 100644 --- a/server/src/main/java/org/elasticsearch/index/store/ByteSizeCachingDirectory.java +++ b/server/src/main/java/org/elasticsearch/index/store/ByteSizeCachingDirectory.java @@ -10,6 +10,7 @@ package org.elasticsearch.index.store; import org.apache.lucene.store.Directory; +import org.apache.lucene.store.FilterDirectory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexOutput; import org.elasticsearch.common.lucene.store.FilterIndexOutput; @@ -19,7 +20,7 @@ import java.io.IOException; import java.io.UncheckedIOException; -final class ByteSizeCachingDirectory extends ByteSizeDirectory { +public final class ByteSizeCachingDirectory extends ByteSizeDirectory { private static class SizeAndModCount { final long size; @@ -174,9 +175,29 @@ public void deleteFile(String name) throws IOException { try { super.deleteFile(name); } finally { - synchronized (this) { - modCount++; + markEstimatedSizeAsStale(); + } + } + + /** + * Mark the cached size as stale so that it is guaranteed to be refreshed the next time. + */ + public void markEstimatedSizeAsStale() { + synchronized (this) { + modCount++; + } + } + + public static ByteSizeCachingDirectory unwrapDirectory(Directory dir) { + while (dir != null) { + if (dir instanceof ByteSizeCachingDirectory) { + return (ByteSizeCachingDirectory) dir; + } else if (dir instanceof FilterDirectory) { + dir = ((FilterDirectory) dir).getDelegate(); + } else { + dir = null; } } + return null; } } diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index e6b499c07f189..322064f09cf77 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -1449,6 +1449,7 @@ public void bootstrapNewHistory() throws IOException { * @see SequenceNumbers#MAX_SEQ_NO */ public void bootstrapNewHistory(long localCheckpoint, long maxSeqNo) throws IOException { + assert indexSettings.getIndexMetadata().isSearchableSnapshot() == false; metadataLock.writeLock().lock(); try (IndexWriter writer = newTemporaryAppendingIndexWriter(directory, null)) { final Map map = new HashMap<>(); @@ -1572,6 +1573,7 @@ private IndexWriter newTemporaryEmptyIndexWriter(final Directory dir, final Vers } private IndexWriterConfig newTemporaryIndexWriterConfig() { + assert indexSettings.getIndexMetadata().isSearchableSnapshot() == false; // this config is only used for temporary IndexWriter instances, used to initialize the index or update the commit data, // so we don't want any merges to happen var iwc = indexWriterConfigWithNoMerging(null).setSoftDeletesField(Lucene.SOFT_DELETES_FIELD).setCommitOnClose(false); diff --git a/server/src/main/java/org/elasticsearch/index/translog/Translog.java b/server/src/main/java/org/elasticsearch/index/translog/Translog.java index 2d3e2d8c20256..75f38ed5f7342 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -220,6 +220,10 @@ public Translog( } } + public static void deleteAll(Path translogLocation) throws IOException { + IOUtils.rm(translogLocation); + } + /** recover all translog files found on disk */ private ArrayList recoverFromFiles(Checkpoint checkpoint) throws IOException { boolean success = false; diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogConfig.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogConfig.java index 66018092089ce..280e319335b12 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogConfig.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogConfig.java @@ -143,4 +143,13 @@ public OperationListener getOperationListener() { public boolean fsync() { return fsync; } + + /** + * @return {@code true} if the configuration allows the Translog files to exist, {@code false} otherwise. In the case there is no + * translog, the shard is not writeable. + */ + public boolean hasTranslog() { + // Expect no translog files to exist for searchable snapshots + return false == indexSettings.getIndexMetadata().isSearchableSnapshot(); + } } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index c8d31d2060caf..d069717a66ad0 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -49,9 +49,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardLongFieldRange; import org.elasticsearch.index.shard.ShardNotFoundException; -import org.elasticsearch.index.shard.StoreRecovery; import org.elasticsearch.index.store.Store; -import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogCorruptedException; import org.elasticsearch.indices.recovery.RecoveriesCollection.RecoveryRef; import org.elasticsearch.tasks.Task; @@ -385,15 +383,8 @@ record StartRecoveryRequestToSend(StartRecoveryRequest startRecoveryRequest, Str logger.trace("{} preparing shard for peer recovery", recoveryTarget.shardId()); indexShard.prepareForIndexRecovery(); if (indexShard.indexSettings().getIndexMetadata().isSearchableSnapshot()) { - // for searchable snapshots, peer recovery is treated similarly to recovery from snapshot + // for archives indices mounted as searchable snapshots, we need to call this indexShard.getIndexEventListener().afterFilesRestoredFromRepository(indexShard); - final Store store = indexShard.store(); - store.incRef(); - try { - StoreRecovery.bootstrap(indexShard, store); - } finally { - store.decRef(); - } } indexShard.recoverLocallyUpToGlobalCheckpoint(ActionListener.assertOnce(l)); }) @@ -488,8 +479,8 @@ public static StartRecoveryRequest getStartRecoveryRequest( // Make sure that the current translog is consistent with the Lucene index; otherwise, we have to throw away the Lucene // index. try { - final String expectedTranslogUUID = metadataSnapshot.commitUserData().get(Translog.TRANSLOG_UUID_KEY); - final long globalCheckpoint = Translog.readGlobalCheckpoint(recoveryTarget.translogLocation(), expectedTranslogUUID); + final long globalCheckpoint = recoveryTarget.indexShard() + .readGlobalCheckpointForRecovery(metadataSnapshot.commitUserData()); assert globalCheckpoint + 1 >= startingSeqNo : "invalid startingSeqNo " + startingSeqNo + " >= " + globalCheckpoint; } catch (IOException | TranslogCorruptedException e) { logGlobalCheckpointWarning(logger, startingSeqNo, e); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index ea485a411143e..362a62c838e3b 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -45,7 +45,6 @@ import java.io.FilterInputStream; import java.io.IOException; import java.io.InputStream; -import java.nio.file.Path; import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; @@ -516,13 +515,7 @@ public void cleanFiles( try { if (indexShard.routingEntry().isPromotableToPrimary()) { store.cleanupAndVerify("recovery CleanFilesRequestHandler", sourceMetadata); - final String translogUUID = Translog.createEmptyTranslog( - indexShard.shardPath().resolveTranslog(), - globalCheckpoint, - shardId, - indexShard.getPendingPrimaryTerm() - ); - store.associateIndexWithNewTranslog(translogUUID); + bootstrap(indexShard, globalCheckpoint); } else { indexShard.setGlobalCheckpointIfUnpromotable(globalCheckpoint); } @@ -634,7 +627,33 @@ public String getTempNameForFile(String origFile) { return multiFileWriter.getTempNameForFile(origFile); } - Path translogLocation() { - return indexShard().shardPath().resolveTranslog(); + private static void bootstrap(final IndexShard indexShard, long globalCheckpoint) throws IOException { + assert indexShard.routingEntry().isPromotableToPrimary(); + final var store = indexShard.store(); + store.incRef(); + try { + final var translogLocation = indexShard.shardPath().resolveTranslog(); + if (indexShard.hasTranslog() == false) { + if (Assertions.ENABLED) { + if (indexShard.indexSettings().getIndexMetadata().isSearchableSnapshot()) { + long localCheckpoint = Long.parseLong( + store.readLastCommittedSegmentsInfo().getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY) + ); + assert localCheckpoint == globalCheckpoint : localCheckpoint + " != " + globalCheckpoint; + } + } + Translog.deleteAll(translogLocation); + return; + } + final String translogUUID = Translog.createEmptyTranslog( + indexShard.shardPath().resolveTranslog(), + globalCheckpoint, + indexShard.shardId(), + indexShard.getPendingPrimaryTerm() + ); + store.associateIndexWithNewTranslog(translogUUID); + } finally { + store.decRef(); + } } } diff --git a/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/OldLuceneVersions.java b/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/OldLuceneVersions.java index 42fe09691d249..e36ae4994c872 100644 --- a/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/OldLuceneVersions.java +++ b/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/OldLuceneVersions.java @@ -27,6 +27,7 @@ import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.engine.ReadOnlyEngine; @@ -34,6 +35,7 @@ import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.translog.TranslogStats; +import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.license.License; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.LicensedFeature; @@ -201,6 +203,12 @@ private static SegmentInfos convertToNewerLuceneVersion(OldSegmentInfos oldSegme if (map.containsKey(Engine.MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID) == false) { map.put(Engine.MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID, "-1"); } + if (map.containsKey(Engine.ES_VERSION) == false) { + assert oldSegmentInfos.getLuceneVersion() + .onOrAfter(RecoverySettings.SEQ_NO_SNAPSHOT_RECOVERIES_SUPPORTED_VERSION.luceneVersion()) == false + : oldSegmentInfos.getLuceneVersion() + " should contain the ES_VERSION"; + map.put(Engine.ES_VERSION, IndexVersions.MINIMUM_COMPATIBLE.toString()); + } segmentInfos.setUserData(map, false); for (SegmentCommitInfo infoPerCommit : oldSegmentInfos.asList()) { final SegmentInfo newInfo = BWCCodec.wrap(infoPerCommit.info); diff --git a/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/OldSegmentInfos.java b/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/OldSegmentInfos.java index 18adebb145f98..b2af41653da61 100644 --- a/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/OldSegmentInfos.java +++ b/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/OldSegmentInfos.java @@ -564,6 +564,10 @@ public long getLastGeneration() { return lastGeneration; } + public Version getLuceneVersion() { + return luceneVersion; + } + /** * Prints the given message to the infoStream. Note, this method does not check for null * infoStream. It assumes this check has been performed by the caller, which is recommended to diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/BaseSearchableSnapshotsIntegTestCase.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/BaseSearchableSnapshotsIntegTestCase.java index 6115bec91ad62..a3ced0bf1b607 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/BaseSearchableSnapshotsIntegTestCase.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/BaseSearchableSnapshotsIntegTestCase.java @@ -74,7 +74,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; @ESIntegTestCase.ClusterScope(supportsDedicatedMasters = false, numClientNodes = 0) @@ -241,7 +240,7 @@ protected void checkSoftDeletesNotEagerlyLoaded(String restoredIndexName) { } } - protected void assertShardFolders(String indexName, boolean snapshotDirectory) throws IOException { + protected void assertShardFolders(String indexName, boolean isSearchableSnapshot) throws IOException { final Index restoredIndex = resolveIndex(indexName); final String customDataPath = resolveCustomDataPath(indexName); final ShardId shardId = new ShardId(restoredIndex, 0); @@ -261,16 +260,16 @@ protected void assertShardFolders(String indexName, boolean snapshotDirectory) t translogExists ); assertThat( - snapshotDirectory ? "Index file should not exist" : "Index file should exist", + isSearchableSnapshot ? "Index file should not exist" : "Index file should exist", indexExists, - not(snapshotDirectory) + not(isSearchableSnapshot) ); - assertThat("Translog should exist", translogExists, is(true)); - try (Stream dir = Files.list(shardPath.resolveTranslog())) { - final long translogFiles = dir.filter(path -> path.getFileName().toString().contains("translog")).count(); - if (snapshotDirectory) { - assertThat("There should be 2 translog files for a snapshot directory", translogFiles, equalTo(2L)); - } else { + if (isSearchableSnapshot) { + assertThat("Translog should not exist", translogExists, equalTo(false)); + } else { + assertThat("Translog should exist", translogExists, equalTo(true)); + try (Stream dir = Files.list(shardPath.resolveTranslog())) { + final long translogFiles = dir.filter(path -> path.getFileName().toString().contains("translog")).count(); assertThat( "There should be 2+ translog files for a non-snapshot directory", translogFiles, diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/FrozenSearchableSnapshotsIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/FrozenSearchableSnapshotsIntegTests.java index 67d9d7a82acf3..2797202e5f24e 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/FrozenSearchableSnapshotsIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/FrozenSearchableSnapshotsIntegTests.java @@ -59,7 +59,6 @@ import java.time.ZoneId; import java.util.Arrays; -import java.util.Collections; import java.util.List; import java.util.Locale; import java.util.Map; @@ -169,12 +168,9 @@ public void testCreateAndRestorePartialSearchableSnapshot() throws Exception { logger.info("--> restoring partial index [{}] with cache enabled", restoredIndexName); Settings.Builder indexSettingsBuilder = Settings.builder().put(SearchableSnapshots.SNAPSHOT_CACHE_ENABLED_SETTING.getKey(), true); - final List nonCachedExtensions; if (randomBoolean()) { - nonCachedExtensions = randomSubsetOf(Arrays.asList("fdt", "fdx", "nvd", "dvd", "tip", "cfs", "dim")); + var nonCachedExtensions = randomSubsetOf(Arrays.asList("fdt", "fdx", "nvd", "dvd", "tip", "cfs", "dim")); indexSettingsBuilder.putList(SearchableSnapshots.SNAPSHOT_CACHE_EXCLUDED_FILE_TYPES_SETTING.getKey(), nonCachedExtensions); - } else { - nonCachedExtensions = Collections.emptyList(); } if (randomBoolean()) { indexSettingsBuilder.put( @@ -264,8 +260,6 @@ public void testCreateAndRestorePartialSearchableSnapshot() throws Exception { final long originalSize = snapshotShards.get(shardRouting.getId()).getStats().getTotalSize(); totalExpectedSize += originalSize; - // an extra segments_N file is created for bootstrapping new history and associating translog. We can extract the size of this - // extra file but we have to unwrap the in-memory directory first. final Directory unwrappedDir = FilterDirectory.unwrap( internalCluster().getInstance(IndicesService.class, getDiscoveryNodes().resolveNode(shardRouting.currentNodeId()).getName()) .indexServiceSafe(shardRouting.index()) @@ -277,7 +271,7 @@ public void testCreateAndRestorePartialSearchableSnapshot() throws Exception { assertThat(shardRouting.toString(), unwrappedDir, instanceOf(ByteBuffersDirectory.class)); final ByteBuffersDirectory inMemoryDir = (ByteBuffersDirectory) unwrappedDir; - assertThat(inMemoryDir.listAll(), arrayWithSize(1)); + assertThat(inMemoryDir.listAll(), arrayWithSize(0)); assertThat(shardRouting.toString(), store.totalDataSetSizeInBytes(), equalTo(originalSize)); } diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotIndexEventListener.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotIndexEventListener.java index cf0306e3e6ef2..4caf932a99807 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotIndexEventListener.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotIndexEventListener.java @@ -21,6 +21,7 @@ import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.store.ByteSizeCachingDirectory; import org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.searchablesnapshots.SearchableSnapshots; @@ -61,6 +62,12 @@ public SearchableSnapshotIndexEventListener( public void beforeIndexShardRecovery(IndexShard indexShard, IndexSettings indexSettings, ActionListener listener) { assert ThreadPool.assertCurrentThreadPool(ThreadPool.Names.GENERIC); ensureSnapshotIsLoaded(indexShard); + var sizeCachingDirectory = ByteSizeCachingDirectory.unwrapDirectory(indexShard.store().directory()); + if (sizeCachingDirectory != null) { + // Marks the cached estimation of the directory size as stale in ByteSizeCachingDirectory since we just loaded the snapshot + // files list into the searchable snapshot directory. + sizeCachingDirectory.markEstimatedSizeAsStale(); + } listener.onResponse(null); }