From 8bc4f8a77dc355e946e77fb15b990819d9abe536 Mon Sep 17 00:00:00 2001 From: Ashish Date: Tue, 26 Sep 2023 16:49:41 +0530 Subject: [PATCH] Enhance debug, trace & info logs for remote store flows (#10182) * Enhance debug, trace & info logs for remote store flows Signed-off-by: Ashish Singh * Add shardId to logger and more logs Signed-off-by: Ashish Singh * Rename unkwown index name to _unknown_ Signed-off-by: Ashish Singh * Empty-Commit Signed-off-by: Ashish Singh --------- Signed-off-by: Ashish Singh --- .../java/org/opensearch/core/index/Index.java | 1 + .../remote/RemoteSegmentTransferTracker.java | 5 +++ .../CloseableRetryableRefreshListener.java | 5 ++- .../opensearch/index/shard/IndexShard.java | 1 + .../shard/RemoteStoreRefreshListener.java | 18 ++++---- .../opensearch/index/shard/StoreRecovery.java | 2 +- .../store/RemoteSegmentStoreDirectory.java | 32 +++++++++++---- .../RemoteSegmentStoreDirectoryFactory.java | 13 +++--- .../translog/InternalTranslogManager.java | 5 ++- .../index/translog/RemoteFsTranslog.java | 12 ++++-- .../transfer/TranslogTransferManager.java | 3 +- .../LocalStorePeerRecoverySourceHandler.java | 3 ++ .../recovery/PeerRecoveryTargetService.java | 6 +-- .../recovery/RecoverySourceHandler.java | 41 ++++++++++++++----- .../RemoteStorePeerRecoverySourceHandler.java | 2 + .../RemoteStoreReplicationSource.java | 4 +- .../SegmentReplicationTargetService.java | 11 ++--- .../checkpoint/PublishCheckpointAction.java | 4 +- .../blobstore/BlobStoreRepository.java | 13 +++++- .../RemoteStoreRefreshListenerTests.java | 3 +- .../RemoteSegmentStoreDirectoryTests.java | 20 +++++---- .../index/shard/IndexShardTestCase.java | 2 +- 22 files changed, 142 insertions(+), 64 deletions(-) diff --git a/libs/core/src/main/java/org/opensearch/core/index/Index.java b/libs/core/src/main/java/org/opensearch/core/index/Index.java index 77cc628213df9..b5ee482bcd952 100644 --- a/libs/core/src/main/java/org/opensearch/core/index/Index.java +++ b/libs/core/src/main/java/org/opensearch/core/index/Index.java @@ -59,6 +59,7 @@ public class Index implements Writeable, ToXContentObject { public static final Index[] EMPTY_ARRAY = new Index[0]; private static final String INDEX_UUID_KEY = "index_uuid"; private static final String INDEX_NAME_KEY = "index_name"; + public static final String UNKNOWN_INDEX_NAME = "_unknown_"; private static final ObjectParser INDEX_PARSER = new ObjectParser<>("index", Builder::new); static { diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java index d18462cd65fc1..05081180bb179 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java @@ -294,6 +294,11 @@ public void updateLatestLocalFileNameLengthMap( Collection segmentFiles, CheckedFunction fileSizeFunction ) { + logger.debug( + "segmentFilesPostRefresh={} latestLocalFileNamesBeforeMapUpdate={}", + segmentFiles, + latestLocalFileNameLengthMap.keySet() + ); // Update the map segmentFiles.stream() .filter(file -> EXCLUDE_FILES.contains(file) == false) diff --git a/server/src/main/java/org/opensearch/index/shard/CloseableRetryableRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/CloseableRetryableRefreshListener.java index 9bc105bf13f0a..3ee74e5267718 100644 --- a/server/src/main/java/org/opensearch/index/shard/CloseableRetryableRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/CloseableRetryableRefreshListener.java @@ -104,6 +104,7 @@ protected TimeValue getNextRetryInterval() { private void scheduleRetry(TimeValue interval, String retryThreadPoolName, boolean didRefresh) { // If the underlying listener has closed, then we do not allow even the retry to be scheduled if (closed.get() || isRetryEnabled() == false) { + getLogger().debug("skip retry on closed={} isRetryEnabled={}", closed.get(), isRetryEnabled()); return; } @@ -112,6 +113,7 @@ private void scheduleRetry(TimeValue interval, String retryThreadPoolName, boole // If the retryScheduled is already true, then we return from here itself. If not, then we proceed with scheduling // the retry. if (retryScheduled.getAndSet(true)) { + getLogger().debug("skip retry on retryScheduled=true"); return; } @@ -188,7 +190,7 @@ public final void close() throws IOException { if (semaphore.tryAcquire(TOTAL_PERMITS, 10, TimeUnit.MINUTES)) { boolean result = closed.compareAndSet(false, true); assert result && semaphore.availablePermits() == 0; - getLogger().info("Closed"); + getLogger().info("All permits are acquired and refresh listener is closed"); } else { throw new TimeoutException("timeout while closing gated refresh listener"); } @@ -200,7 +202,6 @@ public final void close() throws IOException { protected abstract Logger getLogger(); // Visible for testing - /** * Returns if the retry is scheduled or not. * diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index bc2bfc93d0116..493c63e5d46f8 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -3519,6 +3519,7 @@ public void startRecovery( // } // }} // } + logger.debug("startRecovery type={}", recoveryState.getRecoverySource().getType()); assert recoveryState.getRecoverySource().equals(shardRouting.recoverySource()); switch (recoveryState.getRecoverySource().getType()) { case EMPTY_STORE: diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java index 792671304bbff..c554b18dde5e3 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java @@ -127,6 +127,7 @@ protected void runAfterRefreshExactlyOnce(boolean didRefresh) { segmentTracker.updateLocalRefreshTimeAndSeqNo(); try { if (this.primaryTerm != indexShard.getOperationPrimaryTerm()) { + logger.debug("primaryTerm update from={} to={}", primaryTerm, indexShard.getOperationPrimaryTerm()); this.primaryTerm = indexShard.getOperationPrimaryTerm(); this.remoteDirectory.init(); } @@ -150,8 +151,6 @@ protected void runAfterRefreshExactlyOnce(boolean didRefresh) { @Override protected boolean performAfterRefreshWithPermit(boolean didRefresh) { boolean successful; - // The third condition exists for uploading the zero state segments where the refresh has not changed the reader reference, but it - // is important to upload the zero state segments so that the restore does not break. if (shouldSync(didRefresh)) { successful = syncSegments(); } else { @@ -161,6 +160,8 @@ protected boolean performAfterRefreshWithPermit(boolean didRefresh) { } private boolean shouldSync(boolean didRefresh) { + // The third condition exists for uploading the zero state segments where the refresh has not changed the reader reference, but it + // is important to upload the zero state segments so that the restore does not break. return this.primaryTerm != indexShard.getOperationPrimaryTerm() || didRefresh || remoteDirectory.getSegmentsUploadedToRemoteStore().isEmpty(); @@ -168,7 +169,7 @@ private boolean shouldSync(boolean didRefresh) { private boolean syncSegments() { if (indexShard.getReplicationTracker().isPrimaryMode() == false || indexShard.state() == IndexShardState.CLOSED) { - logger.trace( + logger.debug( "Skipped syncing segments with primaryMode={} indexShardState={}", indexShard.getReplicationTracker().isPrimaryMode(), indexShard.state() @@ -178,10 +179,7 @@ private boolean syncSegments() { // primaryMode to true. Due to this, the refresh that is triggered post replay of translog will not go through // if following condition does not exist. The segments created as part of translog replay will not be present // in the remote store. - if (indexShard.state() == IndexShardState.STARTED && indexShard.getEngine() instanceof InternalEngine) { - return false; - } - return true; + return indexShard.state() != IndexShardState.STARTED || !(indexShard.getEngine() instanceof InternalEngine); } ReplicationCheckpoint checkpoint = indexShard.getLatestReplicationCheckpoint(); beforeSegmentsSync(); @@ -217,8 +215,10 @@ private boolean syncSegments() { @Override public void onResponse(Void unused) { try { + logger.debug("New segments upload successful"); // Start metadata file upload uploadMetadata(localSegmentsPostRefresh, segmentInfos, checkpoint); + logger.debug("Metadata upload successful"); clearStaleFilesFromLocalSegmentChecksumMap(localSegmentsPostRefresh); onSuccessfulSegmentsSync( refreshTimeMs, @@ -262,6 +262,7 @@ public void onFailure(Exception e) { updateFinalStatusInSegmentTracker(successful.get(), bytesBeforeUpload, startTimeInNS); // If there are failures in uploading segments, then we should retry as search idle can lead to // refresh not occurring until write happens. + logger.debug("syncSegments runStatus={}", successful.get()); return successful.get(); } @@ -300,6 +301,7 @@ private void onSuccessfulSegmentsSync( ((InternalEngine) indexShard.getEngine()).translogManager().setMinSeqNoToKeep(lastRefreshedCheckpoint + 1); // Publishing the new checkpoint which is used for remote store + segrep indexes checkpointPublisher.publish(indexShard, checkpoint); + logger.debug("onSuccessfulSegmentsSync lastRefreshedCheckpoint={} checkpoint={}", lastRefreshedCheckpoint, checkpoint); } /** @@ -353,10 +355,12 @@ void uploadMetadata(Collection localSegmentsPostRefresh, SegmentInfos se private void uploadNewSegments(Collection localSegmentsPostRefresh, ActionListener listener) { Collection filteredFiles = localSegmentsPostRefresh.stream().filter(file -> !skipUpload(file)).collect(Collectors.toList()); if (filteredFiles.size() == 0) { + logger.debug("No new segments to upload in uploadNewSegments"); listener.onResponse(null); return; } + logger.debug("Effective new segments files to upload {}", filteredFiles); ActionListener> mappedListener = ActionListener.map(listener, resp -> null); GroupedActionListener batchUploadListener = new GroupedActionListener<>(mappedListener, filteredFiles.size()); diff --git a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java index 6d675b709e05b..f429d94f7f96c 100644 --- a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java @@ -399,7 +399,7 @@ void recoverFromSnapshotAndRemoteStore( RemoteSegmentStoreDirectory sourceRemoteDirectory = (RemoteSegmentStoreDirectory) directoryFactory.newDirectory( remoteStoreRepository, indexUUID, - String.valueOf(shardId.id()) + shardId ); indexShard.syncSegmentsFromGivenRemoteSegmentStore(true, sourceRemoteDirectory, primaryTerm, commitGeneration); final Store store = indexShard.store(); diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index a68fed5e302bd..b23d2d7d0a3f8 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -26,8 +26,10 @@ import org.opensearch.common.UUIDs; import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; import org.opensearch.common.io.VersionedCodecStreamWrapper; +import org.opensearch.common.logging.Loggers; import org.opensearch.common.lucene.store.ByteArrayIndexInput; import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.remote.RemoteStoreUtils; import org.opensearch.index.store.lockmanager.FileLockInfo; import org.opensearch.index.store.lockmanager.RemoteStoreCommitLevelLockManager; @@ -42,6 +44,7 @@ import java.io.InputStream; import java.nio.file.NoSuchFileException; import java.nio.file.Path; +import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -99,7 +102,9 @@ public final class RemoteSegmentStoreDirectory extends FilterDirectory implement RemoteSegmentMetadata.METADATA_CODEC ); - private static final Logger logger = LogManager.getLogger(RemoteSegmentStoreDirectory.class); + private static final Logger staticLogger = LogManager.getLogger(RemoteSegmentStoreDirectory.class); + + private final Logger logger; /** * AtomicBoolean that ensures only one staleCommitDeletion activity is scheduled at a time. @@ -113,13 +118,15 @@ public RemoteSegmentStoreDirectory( RemoteDirectory remoteDataDirectory, RemoteDirectory remoteMetadataDirectory, RemoteStoreLockManager mdLockManager, - ThreadPool threadPool + ThreadPool threadPool, + ShardId shardId ) throws IOException { super(remoteDataDirectory); this.remoteDataDirectory = remoteDataDirectory; this.remoteMetadataDirectory = remoteMetadataDirectory; this.mdLockManager = mdLockManager; this.threadPool = threadPool; + this.logger = Loggers.getLogger(getClass(), shardId); init(); } @@ -132,12 +139,14 @@ public RemoteSegmentStoreDirectory( * @throws IOException if there were any failures in reading the metadata file */ public RemoteSegmentMetadata init() throws IOException { + logger.debug("Start initialisation of remote segment metadata"); RemoteSegmentMetadata remoteSegmentMetadata = readLatestMetadataFile(); if (remoteSegmentMetadata != null) { this.segmentsUploadedToRemoteStore = new ConcurrentHashMap<>(remoteSegmentMetadata.getMetadata()); } else { this.segmentsUploadedToRemoteStore = new ConcurrentHashMap<>(); } + logger.debug("Initialisation of remote segment metadata completed"); return remoteSegmentMetadata; } @@ -250,7 +259,7 @@ public static UploadedSegmentMetadata fromString(String uploadedFilename) { String[] values = uploadedFilename.split(SEPARATOR); UploadedSegmentMetadata metadata = new UploadedSegmentMetadata(values[0], values[1], values[2], Long.parseLong(values[3])); if (values.length < 5) { - logger.error("Lucene version is missing for UploadedSegmentMetadata: " + uploadedFilename); + staticLogger.error("Lucene version is missing for UploadedSegmentMetadata: " + uploadedFilename); } metadata.setWrittenByMajor(Integer.parseInt(values[4])); @@ -678,7 +687,7 @@ private Map getSegmentToLuceneVersion(Collection segmen */ private void tryAndDeleteLocalFile(String filename, Directory directory) { try { - logger.trace("Deleting file: " + filename); + logger.debug("Deleting file: " + filename); directory.deleteFile(filename); } catch (NoSuchFileException | FileNotFoundException e) { logger.trace("Exception while deleting. Missing file : " + filename, e); @@ -728,7 +737,7 @@ public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException Integer.MAX_VALUE ); if (sortedMetadataFileList.size() <= lastNMetadataFilesToKeep) { - logger.trace( + logger.debug( "Number of commits in remote segment store={}, lastNMetadataFilesToKeep={}", sortedMetadataFileList.size(), lastNMetadataFilesToKeep @@ -756,6 +765,11 @@ public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException }).collect(Collectors.toList()); sortedMetadataFileList.removeAll(metadataFilesToBeDeleted); + logger.debug( + "metadataFilesEligibleToDelete={} metadataFilesToBeDeleted={}", + metadataFilesEligibleToDelete, + metadataFilesEligibleToDelete + ); Map activeSegmentFilesMetadataMap = new HashMap<>(); Set activeSegmentRemoteFilenames = new HashSet<>(); @@ -773,9 +787,11 @@ public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException .map(metadata -> metadata.uploadedFilename) .collect(Collectors.toSet()); AtomicBoolean deletionSuccessful = new AtomicBoolean(true); + List nonActiveDeletedSegmentFiles = new ArrayList<>(); staleSegmentRemoteFilenames.stream().filter(file -> !activeSegmentRemoteFilenames.contains(file)).forEach(file -> { try { remoteDataDirectory.deleteFile(file); + nonActiveDeletedSegmentFiles.add(file); if (!activeSegmentFilesMetadataMap.containsKey(getLocalSegmentFilename(file))) { segmentsUploadedToRemoteStore.remove(getLocalSegmentFilename(file)); } @@ -790,8 +806,9 @@ public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException ); } }); + logger.debug("nonActiveDeletedSegmentFiles={}", nonActiveDeletedSegmentFiles); if (deletionSuccessful.get()) { - logger.trace("Deleting stale metadata file {} from remote segment store", metadataFile); + logger.debug("Deleting stale metadata file {} from remote segment store", metadataFile); remoteMetadataDirectory.deleteFile(metadataFile); } } @@ -842,7 +859,7 @@ private boolean deleteIfEmpty() throws IOException { 1 ); if (metadataFiles.size() != 0) { - logger.info("Remote directory still has files , not deleting the path"); + logger.info("Remote directory still has files, not deleting the path"); return false; } @@ -858,6 +875,7 @@ private boolean deleteIfEmpty() throws IOException { return true; } + @Override public void close() throws IOException { deleteStaleSegmentsAsync(0, ActionListener.wrap(r -> deleteIfEmpty(), e -> logger.error("Failed to cleanup remote directory"))); } diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java index 31fada7d5c7eb..a64b8536aa946 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java @@ -10,6 +10,7 @@ import org.apache.lucene.store.Directory; import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.ShardPath; import org.opensearch.index.store.lockmanager.RemoteStoreLockManagerFactory; @@ -45,17 +46,15 @@ public RemoteSegmentStoreDirectoryFactory(Supplier reposito public Directory newDirectory(IndexSettings indexSettings, ShardPath path) throws IOException { String repositoryName = indexSettings.getRemoteStoreRepository(); String indexUUID = indexSettings.getIndex().getUUID(); - String shardId = String.valueOf(path.getShardId().getId()); - - return newDirectory(repositoryName, indexUUID, shardId); + return newDirectory(repositoryName, indexUUID, path.getShardId()); } - public Directory newDirectory(String repositoryName, String indexUUID, String shardId) throws IOException { + public Directory newDirectory(String repositoryName, String indexUUID, ShardId shardId) throws IOException { try (Repository repository = repositoriesService.get().repository(repositoryName)) { assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; BlobStoreRepository blobStoreRepository = ((BlobStoreRepository) repository); BlobPath commonBlobPath = blobStoreRepository.basePath(); - commonBlobPath = commonBlobPath.add(indexUUID).add(shardId).add(SEGMENTS); + commonBlobPath = commonBlobPath.add(indexUUID).add(String.valueOf(shardId.id())).add(SEGMENTS); RemoteDirectory dataDirectory = new RemoteDirectory( blobStoreRepository.blobStore().blobContainer(commonBlobPath.add("data")), @@ -69,10 +68,10 @@ public Directory newDirectory(String repositoryName, String indexUUID, String sh repositoriesService.get(), repositoryName, indexUUID, - shardId + String.valueOf(shardId.id()) ); - return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, mdLockManager, threadPool); + return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, mdLockManager, threadPool, shardId); } catch (RepositoryMissingException e) { throw new IllegalArgumentException("Repository should be created before creating index with remote_store enabled setting", e); } diff --git a/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java b/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java index 7014bcabe5cfe..8c6bff591c1ec 100644 --- a/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java +++ b/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java @@ -8,10 +8,10 @@ package org.opensearch.index.translog; -import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; +import org.opensearch.common.logging.Loggers; import org.opensearch.common.util.concurrent.ReleasableLock; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.index.shard.ShardId; @@ -43,7 +43,7 @@ public class InternalTranslogManager implements TranslogManager, Closeable { private final AtomicBoolean pendingTranslogRecovery = new AtomicBoolean(false); private final TranslogEventListener translogEventListener; private final Supplier localCheckpointTrackerSupplier; - private static final Logger logger = LogManager.getLogger(InternalTranslogManager.class); + private final Logger logger; public AtomicBoolean getPendingTranslogRecovery() { return pendingTranslogRecovery; @@ -80,6 +80,7 @@ public InternalTranslogManager( assert pendingTranslogRecovery.get() == false : "translog recovery can't be pending before we set it"; // don't allow commits until we are done with recovering pendingTranslogRecovery.set(true); + this.logger = Loggers.getLogger(getClass(), shardId); } /** diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java index 23903e5a9d110..004d1bfdca36d 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java @@ -181,11 +181,12 @@ static void download(TranslogTransferManager translogTransferManager, Path locat ex = e; } } + logger.debug("Exhausted all download retries during translog/checkpoint file download"); throw ex; } static private void downloadOnce(TranslogTransferManager translogTransferManager, Path location, Logger logger) throws IOException { - logger.trace("Downloading translog files from remote"); + logger.debug("Downloading translog files from remote"); RemoteTranslogTransferTracker statsTracker = translogTransferManager.getRemoteTranslogTransferTracker(); long prevDownloadBytesSucceeded = statsTracker.getDownloadBytesSucceeded(); long prevDownloadTimeInMillis = statsTracker.getTotalDownloadTimeInMillis(); @@ -205,6 +206,11 @@ static private void downloadOnce(TranslogTransferManager translogTransferManager String generation = Long.toString(i); translogTransferManager.downloadTranslog(generationToPrimaryTermMapper.get(generation), generation, location); } + logger.info( + "Downloaded translog and checkpoint files from={} to={}", + translogMetadata.getMinTranslogGeneration(), + translogMetadata.getGeneration() + ); statsTracker.recordDownloadStats(prevDownloadBytesSucceeded, prevDownloadTimeInMillis); @@ -215,7 +221,7 @@ static private void downloadOnce(TranslogTransferManager translogTransferManager location.resolve(Translog.CHECKPOINT_FILE_NAME) ); } - logger.trace("Downloaded translog files from remote"); + logger.debug("downloadOnce execution completed"); } public static TranslogTransferManager buildTranslogTransferManager( @@ -310,7 +316,7 @@ private boolean upload(Long primaryTerm, Long generation) throws IOException { // primary, the engine is reset to InternalEngine which also initialises the RemoteFsTranslog which in turns // downloads all the translogs from remote store and does a flush before the relocation finishes. if (primaryModeSupplier.getAsBoolean() == false) { - logger.trace("skipped uploading translog for {} {}", primaryTerm, generation); + logger.debug("skipped uploading translog for {} {}", primaryTerm, generation); // NO-OP return true; } diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java index fd4936603671c..d7e50cdcf32e4 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java @@ -227,7 +227,7 @@ private void captureStatsOnUploadFailure() { } public boolean downloadTranslog(String primaryTerm, String generation, Path location) throws IOException { - logger.info( + logger.trace( "Downloading translog files with: Primary Term = {}, Generation = {}, Location = {}", primaryTerm, generation, @@ -289,6 +289,7 @@ public TranslogTransferMetadata readMetadata() throws IOException { exceptionSetOnce.set(e); } finally { remoteTranslogTransferTracker.addDownloadTimeInMillis((System.nanoTime() - downloadStartTime) / 1_000_000L); + logger.debug("translogMetadataDownloadStatus={}", downloadStatus); if (downloadStatus) { remoteTranslogTransferTracker.addDownloadBytesSucceeded(bytesToRead); } diff --git a/server/src/main/java/org/opensearch/indices/recovery/LocalStorePeerRecoverySourceHandler.java b/server/src/main/java/org/opensearch/indices/recovery/LocalStorePeerRecoverySourceHandler.java index a915046c381fc..ac6b2e6b77d18 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/LocalStorePeerRecoverySourceHandler.java +++ b/server/src/main/java/org/opensearch/indices/recovery/LocalStorePeerRecoverySourceHandler.java @@ -161,6 +161,7 @@ && isTargetSameHistory() }, shardId + " removing retention lease for [" + request.targetAllocationId() + "]", shard, cancellableThreads, logger); deleteRetentionLeaseStep.whenComplete(ignored -> { + logger.debug("deleteRetentionLeaseStep completed"); assert Transports.assertNotTransportThread(this + "[phase1]"); phase1(wrappedSafeCommit.get(), startingSeqNo, () -> estimateNumOps, sendFileStep, false); }, onFailure); @@ -172,12 +173,14 @@ && isTargetSameHistory() assert startingSeqNo >= 0 : "startingSeqNo must be non negative. got: " + startingSeqNo; sendFileStep.whenComplete(r -> { + logger.debug("sendFileStep completed"); assert Transports.assertNotTransportThread(this + "[prepareTargetForTranslog]"); // For a sequence based recovery, the target can keep its local translog prepareTargetForTranslog(countNumberOfHistoryOperations(startingSeqNo), prepareEngineStep); }, onFailure); prepareEngineStep.whenComplete(prepareEngineTime -> { + logger.debug("prepareEngineStep completed"); assert Transports.assertNotTransportThread(this + "[phase2]"); /* * add shard to replication group (shard will receive replication requests from this point on) now that engine is open. diff --git a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java index a8c8bef15c102..09ea0856fd8d5 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java @@ -265,7 +265,7 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi actionName = PeerRecoverySourceService.Actions.START_RECOVERY; } catch (final Exception e) { // this will be logged as warning later on... - logger.trace("unexpected error while preparing shard for peer recovery, failing recovery", e); + logger.debug("unexpected error while preparing shard for peer recovery, failing recovery", e); onGoingRecoveries.fail( recoveryId, new RecoveryFailedException(recoveryTarget.state(), "failed to prepare shard for recovery", e), @@ -273,12 +273,12 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi ); return; } - logger.trace("{} starting recovery from {}", startRequest.shardId(), startRequest.sourceNode()); + logger.debug("{} starting recovery from {}", startRequest.shardId(), startRequest.sourceNode()); } else { startRequest = preExistingRequest; requestToSend = new ReestablishRecoveryRequest(recoveryId, startRequest.shardId(), startRequest.targetAllocationId()); actionName = PeerRecoverySourceService.Actions.REESTABLISH_RECOVERY; - logger.trace("{} reestablishing recovery from {}", startRequest.shardId(), startRequest.sourceNode()); + logger.debug("{} reestablishing recovery from {}", startRequest.shardId(), startRequest.sourceNode()); } } transportService.sendRequest( diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java index 349ceb51d8173..7d7e2f6114129 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java @@ -202,9 +202,13 @@ protected void finalizeStepAndCompleteFuture( final StepListener finalizeStep = new StepListener<>(); // Recovery target can trim all operations >= startingSeqNo as we have sent all these operations in the phase 2 final long trimAboveSeqNo = startingSeqNo - 1; - sendSnapshotStep.whenComplete(r -> finalizeRecovery(r.targetLocalCheckpoint, trimAboveSeqNo, finalizeStep), onFailure); + sendSnapshotStep.whenComplete(r -> { + logger.debug("sendSnapshotStep completed"); + finalizeRecovery(r.targetLocalCheckpoint, trimAboveSeqNo, finalizeStep); + }, onFailure); finalizeStep.whenComplete(r -> { + logger.debug("finalizeStep completed"); final long phase1ThrottlingWaitTime = 0L; // TODO: return the actual throttle time final SendSnapshotResult sendSnapshotResult = sendSnapshotStep.result(); final SendFileResult sendFileResult = sendFileStep.result(); @@ -234,7 +238,10 @@ protected void onSendFileStepComplete( GatedCloseable wrappedSafeCommit, Releasable releaseStore ) { - sendFileStep.whenComplete(r -> IOUtils.close(wrappedSafeCommit, releaseStore), e -> { + sendFileStep.whenComplete(r -> { + logger.debug("sendFileStep completed"); + IOUtils.close(wrappedSafeCommit, releaseStore); + }, e -> { try { IOUtils.close(wrappedSafeCommit, releaseStore); } catch (final IOException ex) { @@ -446,16 +453,22 @@ void phase1( sendFileInfoStep ); - sendFileInfoStep.whenComplete( - r -> sendFiles(store, phase1Files.toArray(new StoreFileMetadata[0]), translogOps, sendFilesStep), - listener::onFailure - ); + sendFileInfoStep.whenComplete(r -> { + logger.debug("sendFileInfoStep completed"); + sendFiles(store, phase1Files.toArray(new StoreFileMetadata[0]), translogOps, sendFilesStep); + }, listener::onFailure); // When doing peer recovery of remote store enabled replica, retention leases are not required. if (skipCreateRetentionLeaseStep) { - sendFilesStep.whenComplete(r -> createRetentionLeaseStep.onResponse(null), listener::onFailure); + sendFilesStep.whenComplete(r -> { + logger.debug("sendFilesStep completed"); + createRetentionLeaseStep.onResponse(null); + }, listener::onFailure); } else { - sendFilesStep.whenComplete(r -> createRetentionLease(startingSeqNo, createRetentionLeaseStep), listener::onFailure); + sendFilesStep.whenComplete(r -> { + logger.debug("sendFilesStep completed"); + createRetentionLease(startingSeqNo, createRetentionLeaseStep); + }, listener::onFailure); } createRetentionLeaseStep.whenComplete(retentionLease -> { @@ -472,6 +485,7 @@ void phase1( final long totalSize = totalSizeInBytes; final long existingTotalSize = existingTotalSizeInBytes; cleanFilesStep.whenComplete(r -> { + logger.debug("cleanFilesStep completed"); final TimeValue took = stopWatch.totalTime(); logger.trace("recovery [phase1]: took [{}]", took); listener.onResponse( @@ -542,7 +556,10 @@ void createRetentionLease(final long startingSeqNo, ActionListener(logger, shard.getThreadPool(), ThreadPool.Names.GENERIC, cloneRetentionLeaseStep, false) ); logger.trace("cloned primary's retention lease as [{}]", clonedLease); - cloneRetentionLeaseStep.whenComplete(rr -> listener.onResponse(clonedLease), listener::onFailure); + cloneRetentionLeaseStep.whenComplete(rr -> { + logger.debug("cloneRetentionLeaseStep completed"); + listener.onResponse(clonedLease); + }, listener::onFailure); } catch (RetentionLeaseNotFoundException e) { // it's possible that the primary has no retention lease yet if we are doing a rolling upgrade from a version before // 7.4, and in that case we just create a lease using the local checkpoint of the safe commit which we're using for @@ -556,7 +573,10 @@ void createRetentionLease(final long startingSeqNo, ActionListener(logger, shard.getThreadPool(), ThreadPool.Names.GENERIC, addRetentionLeaseStep, false) ); - addRetentionLeaseStep.whenComplete(rr -> listener.onResponse(newLease), listener::onFailure); + addRetentionLeaseStep.whenComplete(rr -> { + logger.debug("addRetentionLeaseStep completed"); + listener.onResponse(newLease); + }, listener::onFailure); logger.trace("created retention lease with estimated checkpoint of [{}]", estimatedGlobalCheckpoint); } }, shardId + " establishing retention lease for [" + request.targetAllocationId() + "]", shard, cancellableThreads, logger); @@ -812,6 +832,7 @@ void finalizeRecovery(long targetLocalCheckpoint, long trimAboveSeqNo, ActionLis cancellableThreads.checkForCancel(); recoveryTarget.finalizeRecovery(globalCheckpoint, trimAboveSeqNo, finalizeListener); finalizeListener.whenComplete(r -> { + logger.debug("finalizeListenerStep completed"); RunUnderPrimaryPermit.run( () -> shard.updateGlobalCheckpointForShard(request.targetAllocationId(), globalCheckpoint), shardId + " updating " + request.targetAllocationId() + "'s global checkpoint", diff --git a/server/src/main/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandler.java b/server/src/main/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandler.java index 5d6c0eb3bae05..66c7a3b48f28f 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandler.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandler.java @@ -80,12 +80,14 @@ protected void innerRecoveryToTarget(ActionListener listener, assert startingSeqNo >= 0 : "startingSeqNo must be non negative. got: " + startingSeqNo; sendFileStep.whenComplete(r -> { + logger.debug("sendFileStep completed"); assert Transports.assertNotTransportThread(this + "[prepareTargetForTranslog]"); // For a sequence based recovery, the target can keep its local translog prepareTargetForTranslog(0, prepareEngineStep); }, onFailure); prepareEngineStep.whenComplete(prepareEngineTime -> { + logger.debug("prepareEngineStep completed"); assert Transports.assertNotTransportThread(this + "[phase2]"); RunUnderPrimaryPermit.run( () -> shard.initiateTracking(request.targetAllocationId()), diff --git a/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java b/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java index 64aacd68f2490..aeb690465905f 100644 --- a/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java +++ b/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java @@ -105,7 +105,7 @@ public void getSegmentFiles( listener.onResponse(new GetSegmentFilesResponse(Collections.emptyList())); return; } - logger.trace("Downloading segments files from remote store {}", filesToFetch); + logger.debug("Downloading segment files from remote store {}", filesToFetch); RemoteSegmentMetadata remoteSegmentMetadata = remoteDirectory.readLatestMetadataFile(); List toDownloadSegments = new ArrayList<>(); @@ -122,7 +122,7 @@ public void getSegmentFiles( toDownloadSegments.add(fileMetadata); } downloadSegments(storeDirectory, remoteDirectory, toDownloadSegments, shardPath, listener); - logger.trace("Downloaded segments from remote store {}", toDownloadSegments); + logger.debug("Downloaded segment files from remote store {}", toDownloadSegments); } finally { indexShard.store().decRef(); indexShard.remoteStore().decRef(); diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java index c071b22ba4cba..ffc4ab86661db 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java @@ -208,7 +208,7 @@ public SegmentReplicationState getSegmentReplicationState(ShardId shardId) { * @param replicaShard replica shard on which checkpoint is received */ public synchronized void onNewCheckpoint(final ReplicationCheckpoint receivedCheckpoint, final IndexShard replicaShard) { - logger.trace(() -> new ParameterizedMessage("Replica received new replication checkpoint from primary [{}]", receivedCheckpoint)); + logger.debug(() -> new ParameterizedMessage("Replica received new replication checkpoint from primary [{}]", receivedCheckpoint)); // if the shard is in any state if (replicaShard.state().equals(IndexShardState.CLOSED)) { // ignore if shard is closed @@ -224,7 +224,7 @@ public synchronized void onNewCheckpoint(final ReplicationCheckpoint receivedChe SegmentReplicationTarget ongoingReplicationTarget = onGoingReplications.getOngoingReplicationTarget(replicaShard.shardId()); if (ongoingReplicationTarget != null) { if (ongoingReplicationTarget.getCheckpoint().getPrimaryTerm() < receivedCheckpoint.getPrimaryTerm()) { - logger.trace( + logger.debug( () -> new ParameterizedMessage( "Cancelling ongoing replication {} from old primary with primary term {}", ongoingReplicationTarget.description(), @@ -233,7 +233,7 @@ public synchronized void onNewCheckpoint(final ReplicationCheckpoint receivedChe ); ongoingReplicationTarget.cancel("Cancelling stuck target after new primary"); } else { - logger.trace( + logger.debug( () -> new ParameterizedMessage( "Ignoring new replication checkpoint - shard is currently replicating to checkpoint {}", ongoingReplicationTarget.getCheckpoint() @@ -247,7 +247,7 @@ public synchronized void onNewCheckpoint(final ReplicationCheckpoint receivedChe startReplication(replicaShard, receivedCheckpoint, new SegmentReplicationListener() { @Override public void onReplicationDone(SegmentReplicationState state) { - logger.trace( + logger.debug( () -> new ParameterizedMessage( "[shardId {}] [replication id {}] Replication complete to {}, timing data: {}", replicaShard.shardId().getId(), @@ -512,7 +512,7 @@ private void start(final long replicationId) { target.startReplication(new ActionListener<>() { @Override public void onResponse(Void o) { - logger.trace(() -> new ParameterizedMessage("Finished replicating {} marking as done.", target.description())); + logger.debug(() -> new ParameterizedMessage("Finished replicating {} marking as done.", target.description())); onGoingReplications.markAsDone(replicationId); if (target.state().getIndex().recoveredFileCount() != 0 && target.state().getIndex().recoveredBytes() != 0) { completedReplications.put(target.shardId(), target); @@ -521,6 +521,7 @@ public void onResponse(Void o) { @Override public void onFailure(Exception e) { + logger.debug("Replication failed {}", target.description()); if (e instanceof OpenSearchCorruptionException) { onGoingReplications.fail(replicationId, new ReplicationFailedException("Store corruption during replication", e), true); return; diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java index 76f0d333db977..11b01965c4af5 100644 --- a/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java @@ -137,7 +137,7 @@ public String executor() { @Override public void handleResponse(ReplicationResponse response) { timer.stop(); - logger.trace( + logger.debug( () -> new ParameterizedMessage( "[shardId {}] Completed publishing checkpoint [{}], timing: {}", indexShard.shardId().getId(), @@ -152,7 +152,7 @@ public void handleResponse(ReplicationResponse response) { @Override public void handleException(TransportException e) { timer.stop(); - logger.trace("[shardId {}] Failed to publish checkpoint, timing: {}", indexShard.shardId().getId(), timer.time()); + logger.debug("[shardId {}] Failed to publish checkpoint, timing: {}", indexShard.shardId().getId(), timer.time()); task.setPhase("finished"); taskManager.unregister(task); if (ExceptionsHelper.unwrap( diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index 8ff39f7e50d70..d5cdd9f68f443 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -100,6 +100,7 @@ import org.opensearch.core.compress.Compressor; import org.opensearch.core.compress.CompressorRegistry; import org.opensearch.core.compress.NotXContentException; +import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.index.snapshots.IndexShardSnapshotFailedException; import org.opensearch.core.util.BytesRefUtils; @@ -1188,7 +1189,11 @@ private void executeStaleShardDelete( new RemoteSegmentStoreDirectoryFactory( remoteStoreLockManagerFactory.getRepositoriesService(), threadPool - ).newDirectory(remoteStoreRepoForIndex, indexUUID, shardId).close(); + ).newDirectory( + remoteStoreRepoForIndex, + indexUUID, + new ShardId(Index.UNKNOWN_INDEX_NAME, indexUUID, Integer.valueOf(shardId)) + ).close(); } } } @@ -1654,7 +1659,11 @@ private void executeOneStaleIndexDelete( new RemoteSegmentStoreDirectoryFactory( remoteStoreLockManagerFactory.getRepositoriesService(), threadPool - ).newDirectory(remoteStoreRepoForIndex, indexUUID, shardBlob.getKey()).close(); + ).newDirectory( + remoteStoreRepoForIndex, + indexUUID, + new ShardId(Index.UNKNOWN_INDEX_NAME, indexUUID, Integer.valueOf(shardBlob.getKey())) + ).close(); } } } diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java index c713ccdddd66a..415efd4ac23b6 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java @@ -152,7 +152,8 @@ public void testRemoteDirectoryInitThrowsException() throws IOException { mock(RemoteDirectory.class), remoteMetadataDirectory, mock(RemoteStoreLockManager.class), - mock(ThreadPool.class) + mock(ThreadPool.class), + shardId ); FilterDirectory remoteStoreFilterDirectory = new RemoteStoreRefreshListenerTests.TestFilterDirectory( new RemoteStoreRefreshListenerTests.TestFilterDirectory(remoteSegmentStoreDirectory) diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java index b016e8be67799..8d99d98fbaaf4 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -8,6 +8,8 @@ package org.opensearch.index.store; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexFormatTooNewException; @@ -77,6 +79,7 @@ import static org.mockito.Mockito.when; public class RemoteSegmentStoreDirectoryTests extends IndexShardTestCase { + private static final Logger logger = LogManager.getLogger(RemoteSegmentStoreDirectoryTests.class); private RemoteDirectory remoteDataDirectory; private RemoteDirectory remoteMetadataDirectory; private RemoteStoreMetadataLockManager mdLockManager; @@ -97,13 +100,6 @@ public void setup() throws IOException { remoteMetadataDirectory = mock(RemoteDirectory.class); mdLockManager = mock(RemoteStoreMetadataLockManager.class); threadPool = mock(ThreadPool.class); - - remoteSegmentStoreDirectory = new RemoteSegmentStoreDirectory( - remoteDataDirectory, - remoteMetadataDirectory, - mdLockManager, - threadPool - ); testUploadTracker = new TestUploadListener(); Settings indexSettings = Settings.builder() @@ -113,6 +109,13 @@ public void setup() throws IOException { ExecutorService executorService = OpenSearchExecutors.newDirectExecutorService(); indexShard = newStartedShard(false, indexSettings, new NRTReplicationEngineFactory()); + remoteSegmentStoreDirectory = new RemoteSegmentStoreDirectory( + remoteDataDirectory, + remoteMetadataDirectory, + mdLockManager, + threadPool, + indexShard.shardId() + ); try (Store store = indexShard.store()) { segmentInfos = store.readLastCommittedSegmentsInfo(); } @@ -636,7 +639,8 @@ public void testCopyFilesFromMultipartIOException() throws Exception { remoteDataDirectory, remoteMetadataDirectory, mdLockManager, - threadPool + threadPool, + indexShard.shardId() ); populateMetadata(); diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 7a54c32248218..4505570b8ebe0 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -784,7 +784,7 @@ protected RemoteSegmentStoreDirectory createRemoteSegmentStoreDirectory(ShardId RemoteStoreLockManager remoteStoreLockManager = new RemoteStoreMetadataLockManager( new RemoteBufferedOutputDirectory(getBlobContainer(remoteShardPath.resolveIndex())) ); - return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, remoteStoreLockManager, threadPool); + return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, remoteStoreLockManager, threadPool, shardId); } private RemoteDirectory newRemoteDirectory(Path f) throws IOException {