From 9f25c38d867fc69e6629243a56dfee8cd30c908f Mon Sep 17 00:00:00 2001 From: Sandeep Kumawat <2025sandeepkumawat@gmail.com> Date: Wed, 22 May 2024 20:01:00 +0530 Subject: [PATCH 01/10] =?UTF-8?q?Implement=20interface=20changes=20for=20s?= =?UTF-8?q?3=20plugin=20to=20read/write=20blob=20with=20obj=E2=80=A6=20(#1?= =?UTF-8?q?3113)=20(#13765)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Implement interface changes for s3 plugin to read/write blob with object metadata --------- Signed-off-by: Sandeep Kumawat --- .../repositories/s3/S3BlobContainer.java | 60 ++++++++++++++++--- .../s3/S3RetryingInputStream.java | 7 +++ .../s3/async/AsyncTransferManager.java | 9 +++ .../repositories/s3/async/UploadRequest.java | 15 ++++- .../s3/S3BlobContainerMockClientTests.java | 11 +++- .../s3/S3BlobContainerRetriesTests.java | 59 ++++++++++++++++-- .../s3/S3BlobStoreContainerTests.java | 32 +++++++--- .../s3/S3RetryingInputStreamTests.java | 11 ++++ .../s3/async/AsyncTransferManagerTests.java | 25 ++++++-- .../common/blobstore/BlobContainer.java | 13 ++-- ...loadResponse.java => FetchBlobResult.java} | 7 ++- .../blobstore/stream/write/WriteContext.java | 2 +- .../transfer/BlobStoreTransferService.java | 4 +- .../translog/transfer/TransferService.java | 6 +- 14 files changed, 220 insertions(+), 41 deletions(-) rename server/src/main/java/org/opensearch/common/blobstore/{BlobDownloadResponse.java => FetchBlobResult.java} (82%) diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java index c6706cf1b3f61..acf0c5e83a17b 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java @@ -62,6 +62,7 @@ import software.amazon.awssdk.services.s3.model.UploadPartRequest; import software.amazon.awssdk.services.s3.model.UploadPartResponse; import software.amazon.awssdk.services.s3.paginators.ListObjectsV2Iterable; +import software.amazon.awssdk.utils.CollectionUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -77,6 +78,7 @@ import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStoreException; import org.opensearch.common.blobstore.DeleteResult; +import org.opensearch.common.blobstore.FetchBlobResult; import org.opensearch.common.blobstore.stream.read.ReadContext; import org.opensearch.common.blobstore.stream.write.WriteContext; import org.opensearch.common.blobstore.stream.write.WritePriority; @@ -139,6 +141,13 @@ public boolean blobExists(String blobName) { } } + @ExperimentalApi + @Override + public FetchBlobResult readBlobWithMetadata(String blobName) throws IOException { + S3RetryingInputStream s3RetryingInputStream = new S3RetryingInputStream(blobStore, buildKey(blobName)); + return new FetchBlobResult(s3RetryingInputStream, s3RetryingInputStream.getMetadata()); + } + @Override public InputStream readBlob(String blobName) throws IOException { return new S3RetryingInputStream(blobStore, buildKey(blobName)); @@ -170,12 +179,27 @@ public long readBlobPreferredLength() { */ @Override public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException { + writeBlobWithMetadata(blobName, inputStream, blobSize, failIfAlreadyExists, null); + } + + /** + * Write blob with its object metadata. + */ + @ExperimentalApi + @Override + public void writeBlobWithMetadata( + String blobName, + InputStream inputStream, + long blobSize, + boolean failIfAlreadyExists, + @Nullable Map metadata + ) throws IOException { assert inputStream.markSupported() : "No mark support on inputStream breaks the S3 SDK's ability to retry requests"; SocketAccess.doPrivilegedIOException(() -> { if (blobSize <= getLargeBlobThresholdInBytes()) { - executeSingleUpload(blobStore, buildKey(blobName), inputStream, blobSize); + executeSingleUpload(blobStore, buildKey(blobName), inputStream, blobSize, metadata); } else { - executeMultipartUpload(blobStore, buildKey(blobName), inputStream, blobSize); + executeMultipartUpload(blobStore, buildKey(blobName), inputStream, blobSize, metadata); } return null; }); @@ -191,7 +215,8 @@ public void asyncBlobUpload(WriteContext writeContext, ActionListener comp writeContext.getUploadFinalizer(), writeContext.doRemoteDataIntegrityCheck(), writeContext.getExpectedChecksum(), - blobStore.isUploadRetryEnabled() + blobStore.isUploadRetryEnabled(), + writeContext.getMetadata() ); try { // If file size is greater than the queue capacity than SizeBasedBlockingQ will always reject the upload. @@ -211,7 +236,8 @@ public void asyncBlobUpload(WriteContext writeContext, ActionListener comp blobStore, uploadRequest.getKey(), inputStream.getInputStream(), - uploadRequest.getContentLength() + uploadRequest.getContentLength(), + uploadRequest.getMetadata() ); completionListener.onResponse(null); } catch (Exception ex) { @@ -582,8 +608,13 @@ private String buildKey(String blobName) { /** * Uploads a blob using a single upload request */ - void executeSingleUpload(final S3BlobStore blobStore, final String blobName, final InputStream input, final long blobSize) - throws IOException { + void executeSingleUpload( + final S3BlobStore blobStore, + final String blobName, + final InputStream input, + final long blobSize, + final Map metadata + ) throws IOException { // Extra safety checks if (blobSize > MAX_FILE_SIZE.getBytes()) { @@ -600,6 +631,10 @@ void executeSingleUpload(final S3BlobStore blobStore, final String blobName, fin .storageClass(blobStore.getStorageClass()) .acl(blobStore.getCannedACL()) .overrideConfiguration(o -> o.addMetricPublisher(blobStore.getStatsMetricPublisher().putObjectMetricPublisher)); + + if (CollectionUtils.isNotEmpty(metadata)) { + putObjectRequestBuilder = putObjectRequestBuilder.metadata(metadata); + } if (blobStore.serverSideEncryption()) { putObjectRequestBuilder.serverSideEncryption(ServerSideEncryption.AES256); } @@ -623,8 +658,13 @@ void executeSingleUpload(final S3BlobStore blobStore, final String blobName, fin /** * Uploads a blob using multipart upload requests. */ - void executeMultipartUpload(final S3BlobStore blobStore, final String blobName, final InputStream input, final long blobSize) - throws IOException { + void executeMultipartUpload( + final S3BlobStore blobStore, + final String blobName, + final InputStream input, + final long blobSize, + final Map metadata + ) throws IOException { ensureMultiPartUploadSize(blobSize); final long partSize = blobStore.bufferSizeInBytes(); @@ -649,6 +689,10 @@ void executeMultipartUpload(final S3BlobStore blobStore, final String blobName, .acl(blobStore.getCannedACL()) .overrideConfiguration(o -> o.addMetricPublisher(blobStore.getStatsMetricPublisher().multipartUploadMetricCollector)); + if (CollectionUtils.isNotEmpty(metadata)) { + createMultipartUploadRequestBuilder.metadata(metadata); + } + if (blobStore.serverSideEncryption()) { createMultipartUploadRequestBuilder.serverSideEncryption(ServerSideEncryption.AES256); } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java index d7e47e0ab1bcc..2eb63178b19e0 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java @@ -48,6 +48,7 @@ import java.nio.file.NoSuchFileException; import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; /** @@ -77,6 +78,7 @@ class S3RetryingInputStream extends InputStream { private long currentOffset; private boolean closed; private boolean eof; + private Map metadata; S3RetryingInputStream(S3BlobStore blobStore, String blobKey) throws IOException { this(blobStore, blobKey, 0, Long.MAX_VALUE - 1); @@ -122,6 +124,7 @@ private void openStream() throws IOException { getObjectResponseInputStream.response().contentLength() ); this.currentStream = getObjectResponseInputStream; + this.metadata = getObjectResponseInputStream.response().metadata(); this.isStreamAborted.set(false); } catch (final SdkException e) { if (e instanceof S3Exception) { @@ -265,4 +268,8 @@ boolean isEof() { boolean isAborted() { return isStreamAborted.get(); } + + Map getMetadata() { + return this.metadata; + } } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java index 45705431fcfc9..0f9bf3be77d73 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java @@ -23,6 +23,7 @@ import software.amazon.awssdk.services.s3.model.PutObjectRequest; import software.amazon.awssdk.services.s3.model.PutObjectResponse; import software.amazon.awssdk.services.s3.model.S3Exception; +import software.amazon.awssdk.utils.CollectionUtils; import software.amazon.awssdk.utils.CompletableFutureUtils; import org.apache.logging.log4j.LogManager; @@ -154,6 +155,10 @@ private void uploadInParts( .bucket(uploadRequest.getBucket()) .key(uploadRequest.getKey()) .overrideConfiguration(o -> o.addMetricPublisher(statsMetricPublisher.multipartUploadMetricCollector)); + + if (CollectionUtils.isNotEmpty(uploadRequest.getMetadata())) { + createMultipartUploadRequestBuilder.metadata(uploadRequest.getMetadata()); + } if (uploadRequest.doRemoteDataIntegrityCheck()) { createMultipartUploadRequestBuilder.checksumAlgorithm(ChecksumAlgorithm.CRC32); } @@ -352,6 +357,10 @@ private void uploadInOneChunk( .key(uploadRequest.getKey()) .contentLength(uploadRequest.getContentLength()) .overrideConfiguration(o -> o.addMetricPublisher(statsMetricPublisher.putObjectMetricPublisher)); + + if (CollectionUtils.isNotEmpty(uploadRequest.getMetadata())) { + putObjectRequestBuilder.metadata(uploadRequest.getMetadata()); + } if (uploadRequest.doRemoteDataIntegrityCheck()) { putObjectRequestBuilder.checksumAlgorithm(ChecksumAlgorithm.CRC32); putObjectRequestBuilder.checksumCRC32(base64StringFromLong(uploadRequest.getExpectedChecksum())); diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/UploadRequest.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/UploadRequest.java index 8ea90880b91c5..9873bfeafe005 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/UploadRequest.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/UploadRequest.java @@ -9,9 +9,11 @@ package org.opensearch.repositories.s3.async; import org.opensearch.common.CheckedConsumer; +import org.opensearch.common.Nullable; import org.opensearch.common.blobstore.stream.write.WritePriority; import java.io.IOException; +import java.util.Map; /** * A model encapsulating all details for an upload to S3 @@ -25,6 +27,7 @@ public class UploadRequest { private final boolean doRemoteDataIntegrityCheck; private final Long expectedChecksum; private final boolean uploadRetryEnabled; + private final Map metadata; /** * Construct a new UploadRequest object @@ -36,6 +39,7 @@ public class UploadRequest { * @param uploadFinalizer An upload finalizer to call once all parts are uploaded * @param doRemoteDataIntegrityCheck A boolean to inform vendor plugins whether remote data integrity checks need to be done * @param expectedChecksum Checksum of the file being uploaded for remote data integrity check + * @param metadata Metadata of the file being uploaded */ public UploadRequest( String bucket, @@ -45,7 +49,8 @@ public UploadRequest( CheckedConsumer uploadFinalizer, boolean doRemoteDataIntegrityCheck, Long expectedChecksum, - boolean uploadRetryEnabled + boolean uploadRetryEnabled, + @Nullable Map metadata ) { this.bucket = bucket; this.key = key; @@ -55,6 +60,7 @@ public UploadRequest( this.doRemoteDataIntegrityCheck = doRemoteDataIntegrityCheck; this.expectedChecksum = expectedChecksum; this.uploadRetryEnabled = uploadRetryEnabled; + this.metadata = metadata; } public String getBucket() { @@ -88,4 +94,11 @@ public Long getExpectedChecksum() { public boolean isUploadRetryEnabled() { return uploadRetryEnabled; } + + /** + * @return metadata of the blob to be uploaded + */ + public Map getMetadata() { + return metadata; + } } diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java index 813dccecd24f8..f537171c4b5f1 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java @@ -60,6 +60,7 @@ import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; +import java.util.HashMap; import java.util.List; import java.util.Locale; import java.util.concurrent.CompletableFuture; @@ -79,6 +80,7 @@ import static org.opensearch.repositories.s3.S3Repository.BULK_DELETE_SIZE; import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyMap; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doAnswer; @@ -722,6 +724,7 @@ private void testLargeFilesRedirectedToSlowSyncClient(boolean expectException, W .writePriority(writePriority) .uploadFinalizer(Assert::assertTrue) .doRemoteDataIntegrityCheck(false) + .metadata(new HashMap<>()) .build(); s3BlobContainer.asyncBlobUpload(writeContext, completionListener); @@ -731,7 +734,13 @@ private void testLargeFilesRedirectedToSlowSyncClient(boolean expectException, W } else { assertNull(exceptionRef.get()); } - verify(s3BlobContainer, times(1)).executeMultipartUpload(any(S3BlobStore.class), anyString(), any(InputStream.class), anyLong()); + verify(s3BlobContainer, times(1)).executeMultipartUpload( + any(S3BlobStore.class), + anyString(), + any(InputStream.class), + anyLong(), + anyMap() + ); if (expectException) { verify(client, times(1)).abortMultipartUpload(any(AbortMultipartUploadRequest.class)); diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java index 769e4fd1193c0..96ef28d24c14f 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java @@ -81,8 +81,10 @@ import java.net.SocketTimeoutException; import java.nio.charset.StandardCharsets; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; import java.util.Locale; +import java.util.Map; import java.util.Objects; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; @@ -283,7 +285,7 @@ public InputStream readBlob(String blobName, long position, long length) throws }; } - public void testWriteBlobWithRetries() throws Exception { + public void writeBlobWithRetriesHelper(Map metadata) throws Exception { final int maxRetries = randomInt(5); final CountDown countDown = new CountDown(maxRetries + 1); @@ -323,11 +325,26 @@ public void testWriteBlobWithRetries() throws Exception { final BlobContainer blobContainer = createBlobContainer(maxRetries, null, true, null); try (InputStream stream = new ByteArrayInputStream(bytes)) { - blobContainer.writeBlob("write_blob_max_retries", stream, bytes.length, false); + if (metadata != null) { + blobContainer.writeBlobWithMetadata("write_blob_max_retries", stream, bytes.length, false, metadata); + } else { + blobContainer.writeBlob("write_blob_max_retries", stream, bytes.length, false); + } } assertThat(countDown.isCountedDown(), is(true)); } + public void testWriteBlobWithMetadataWithRetries() throws Exception { + Map metadata = new HashMap<>(); + metadata.put("key1", "value1"); + metadata.put("key2", "value2"); + writeBlobWithRetriesHelper(metadata); + } + + public void testWriteBlobWithRetries() throws Exception { + writeBlobWithRetriesHelper(null); + } + public void testWriteBlobByStreamsWithRetries() throws Exception { final int maxRetries = randomInt(5); final CountDown countDown = new CountDown(maxRetries + 1); @@ -411,7 +428,7 @@ private int calculateNumberOfParts(long contentLength, long partSize) { return (int) ((contentLength % partSize) == 0 ? contentLength / partSize : (contentLength / partSize) + 1); } - public void testWriteBlobWithReadTimeouts() { + public void writeBlobWithReadTimeoutsHelper(Map metadata) { final byte[] bytes = randomByteArrayOfLength(randomIntBetween(10, 128)); final TimeValue readTimeout = TimeValue.timeValueMillis(randomIntBetween(100, 500)); final BlobContainer blobContainer = createBlobContainer(1, readTimeout, true, null); @@ -429,7 +446,11 @@ public void testWriteBlobWithReadTimeouts() { Exception exception = expectThrows(IOException.class, () -> { try (InputStream stream = new InputStreamIndexInput(new ByteArrayIndexInput("desc", bytes), bytes.length)) { - blobContainer.writeBlob("write_blob_timeout", stream, bytes.length, false); + if (metadata != null) { + blobContainer.writeBlobWithMetadata("write_blob_timeout", stream, bytes.length, false, metadata); + } else { + blobContainer.writeBlob("write_blob_timeout", stream, bytes.length, false); + } } }); assertThat( @@ -444,7 +465,18 @@ public void testWriteBlobWithReadTimeouts() { assertThat(exception.getCause().getCause().getMessage().toLowerCase(Locale.ROOT), containsString("read timed out")); } - public void testWriteLargeBlob() throws Exception { + public void testWriteBlobWithMetadataWithReadTimeouts() throws Exception { + Map metadata = new HashMap<>(); + metadata.put("key1", "value1"); + metadata.put("key2", "value2"); + writeBlobWithReadTimeoutsHelper(metadata); + } + + public void testWriteBlobWithReadTimeouts() throws Exception { + writeBlobWithReadTimeoutsHelper(null); + } + + public void WriteLargeBlobHelper(Map metadata) throws Exception { final boolean useTimeout = rarely(); final TimeValue readTimeout = useTimeout ? TimeValue.timeValueMillis(randomIntBetween(100, 500)) : null; final ByteSizeValue bufferSize = new ByteSizeValue(5, ByteSizeUnit.MB); @@ -530,13 +562,28 @@ public void testWriteLargeBlob() throws Exception { } }); - blobContainer.writeBlob("write_large_blob", new ZeroInputStream(blobSize), blobSize, false); + if (metadata != null) { + blobContainer.writeBlobWithMetadata("write_large_blob", new ZeroInputStream(blobSize), blobSize, false, metadata); + } else { + blobContainer.writeBlob("write_large_blob", new ZeroInputStream(blobSize), blobSize, false); + } assertThat(countDownInitiate.isCountedDown(), is(true)); assertThat(countDownUploads.get(), equalTo(0)); assertThat(countDownComplete.isCountedDown(), is(true)); } + public void testWriteLargeBlobWithMetadata() throws Exception { + Map metadata = new HashMap<>(); + metadata.put("key1", "value1"); + metadata.put("key2", "value2"); + WriteLargeBlobHelper(metadata); + } + + public void testWriteLargeBlob() throws Exception { + WriteLargeBlobHelper(null); + } + /** * Asserts that an InputStream is fully consumed, or aborted, when it is closed */ diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java index 2b45e9cfe2d4b..654d8a72690c4 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java @@ -90,6 +90,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.Comparator; +import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -125,7 +126,7 @@ public void testExecuteSingleUploadBlobSizeTooLarge() { final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> blobContainer.executeSingleUpload(blobStore, randomAlphaOfLengthBetween(1, 10), null, blobSize) + () -> blobContainer.executeSingleUpload(blobStore, randomAlphaOfLengthBetween(1, 10), null, blobSize, null) ); assertEquals("Upload request size [" + blobSize + "] can't be larger than 5gb", e.getMessage()); } @@ -139,7 +140,13 @@ public void testExecuteSingleUploadBlobSizeLargerThanBufferSize() { final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> blobContainer.executeSingleUpload(blobStore, blobName, new ByteArrayInputStream(new byte[0]), ByteSizeUnit.MB.toBytes(2)) + () -> blobContainer.executeSingleUpload( + blobStore, + blobName, + new ByteArrayInputStream(new byte[0]), + ByteSizeUnit.MB.toBytes(2), + null + ) ); assertEquals("Upload request size [2097152] can't be larger than buffer size", e.getMessage()); } @@ -430,6 +437,10 @@ public void testExecuteSingleUpload() throws IOException { final String bucketName = randomAlphaOfLengthBetween(1, 10); final String blobName = randomAlphaOfLengthBetween(1, 10); + final Map metadata = new HashMap<>(); + metadata.put("key1", "value1"); + metadata.put("key2", "value2"); + final BlobPath blobPath = new BlobPath(); if (randomBoolean()) { IntStream.of(randomIntBetween(1, 5)).forEach(value -> blobPath.add("path_" + value)); @@ -467,7 +478,7 @@ public void testExecuteSingleUpload() throws IOException { ); final ByteArrayInputStream inputStream = new ByteArrayInputStream(new byte[blobSize]); - blobContainer.executeSingleUpload(blobStore, blobName, inputStream, blobSize); + blobContainer.executeSingleUpload(blobStore, blobName, inputStream, blobSize, metadata); final PutObjectRequest request = putObjectRequestArgumentCaptor.getValue(); final RequestBody requestBody = requestBodyArgumentCaptor.getValue(); @@ -480,6 +491,7 @@ public void testExecuteSingleUpload() throws IOException { assertEquals(blobSize, request.contentLength().longValue()); assertEquals(storageClass, request.storageClass()); assertEquals(cannedAccessControlList, request.acl()); + assertEquals(metadata, request.metadata()); if (serverSideEncryption) { assertEquals(ServerSideEncryption.AES256, request.serverSideEncryption()); } @@ -492,7 +504,7 @@ public void testExecuteMultipartUploadBlobSizeTooLarge() { final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> blobContainer.executeMultipartUpload(blobStore, randomAlphaOfLengthBetween(1, 10), null, blobSize) + () -> blobContainer.executeMultipartUpload(blobStore, randomAlphaOfLengthBetween(1, 10), null, blobSize, null) ); assertEquals("Multipart upload request size [" + blobSize + "] can't be larger than 5tb", e.getMessage()); } @@ -504,7 +516,7 @@ public void testExecuteMultipartUploadBlobSizeTooSmall() { final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> blobContainer.executeMultipartUpload(blobStore, randomAlphaOfLengthBetween(1, 10), null, blobSize) + () -> blobContainer.executeMultipartUpload(blobStore, randomAlphaOfLengthBetween(1, 10), null, blobSize, null) ); assertEquals("Multipart upload request size [" + blobSize + "] can't be smaller than 5mb", e.getMessage()); } @@ -513,6 +525,10 @@ public void testExecuteMultipartUpload() throws IOException { final String bucketName = randomAlphaOfLengthBetween(1, 10); final String blobName = randomAlphaOfLengthBetween(1, 10); + final Map metadata = new HashMap<>(); + metadata.put("key1", "value1"); + metadata.put("key2", "value2"); + final BlobPath blobPath = new BlobPath(); if (randomBoolean()) { IntStream.of(randomIntBetween(1, 5)).forEach(value -> blobPath.add("path_" + value)); @@ -577,13 +593,15 @@ public void testExecuteMultipartUpload() throws IOException { final ByteArrayInputStream inputStream = new ByteArrayInputStream(new byte[0]); final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); - blobContainer.executeMultipartUpload(blobStore, blobName, inputStream, blobSize); + blobContainer.executeMultipartUpload(blobStore, blobName, inputStream, blobSize, metadata); final CreateMultipartUploadRequest initRequest = createMultipartUploadRequestArgumentCaptor.getValue(); assertEquals(bucketName, initRequest.bucket()); assertEquals(blobPath.buildAsString() + blobName, initRequest.key()); assertEquals(storageClass, initRequest.storageClass()); assertEquals(cannedAccessControlList, initRequest.acl()); + assertEquals(metadata, initRequest.metadata()); + if (serverSideEncryption) { assertEquals(ServerSideEncryption.AES256, initRequest.serverSideEncryption()); } @@ -686,7 +704,7 @@ public void testExecuteMultipartUploadAborted() { final IOException e = expectThrows(IOException.class, () -> { final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); - blobContainer.executeMultipartUpload(blobStore, blobName, new ByteArrayInputStream(new byte[0]), blobSize); + blobContainer.executeMultipartUpload(blobStore, blobName, new ByteArrayInputStream(new byte[0]), blobSize, null); }); assertEquals("Unable to upload object [" + blobName + "] using multipart upload", e.getMessage()); diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RetryingInputStreamTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RetryingInputStreamTests.java index b38d5119b4108..d884a46f3ecc5 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RetryingInputStreamTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RetryingInputStreamTests.java @@ -43,6 +43,8 @@ import java.io.ByteArrayInputStream; import java.io.IOException; import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; import static org.hamcrest.Matchers.is; import static org.mockito.Mockito.any; @@ -61,6 +63,15 @@ public void testInputStreamFullyConsumed() throws IOException { assertThat(stream.isAborted(), is(false)); } + public void testInputStreamGetMetadata() throws IOException { + final byte[] expectedBytes = randomByteArrayOfLength(randomIntBetween(1, 512)); + + final S3RetryingInputStream stream = createInputStream(expectedBytes, 0L, (long) (Integer.MAX_VALUE - 1)); + + Map metadata = new HashMap<>(); + assertEquals(stream.getMetadata(), metadata); + } + public void testInputStreamIsAborted() throws IOException { final byte[] expectedBytes = randomByteArrayOfLength(randomIntBetween(10, 512)); final byte[] actualBytes = new byte[randomIntBetween(1, Math.max(1, expectedBytes.length - 1))]; diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/AsyncTransferManagerTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/AsyncTransferManagerTests.java index 55b58895abfd4..89add3cdbfc60 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/AsyncTransferManagerTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/AsyncTransferManagerTests.java @@ -41,7 +41,9 @@ import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executors; @@ -88,11 +90,14 @@ public void testOneChunkUpload() { ); AtomicReference streamRef = new AtomicReference<>(); + Map metadata = new HashMap<>(); + metadata.put("key1", "value1"); + metadata.put("key2", "value2"); CompletableFuture resultFuture = asyncTransferManager.uploadObject( s3AsyncClient, new UploadRequest("bucket", "key", ByteSizeUnit.MB.toBytes(1), WritePriority.HIGH, uploadSuccess -> { // do nothing - }, false, null, true), + }, false, null, true, metadata), new StreamContext((partIdx, partSize, position) -> { streamRef.set(new ZeroInputStream(partSize)); return new InputStreamContainer(streamRef.get(), partSize, position); @@ -133,11 +138,15 @@ public void testOneChunkUploadCorruption() { deleteObjectResponseCompletableFuture.complete(DeleteObjectResponse.builder().build()); when(s3AsyncClient.deleteObject(any(DeleteObjectRequest.class))).thenReturn(deleteObjectResponseCompletableFuture); + Map metadata = new HashMap<>(); + metadata.put("key1", "value1"); + metadata.put("key2", "value2"); + CompletableFuture resultFuture = asyncTransferManager.uploadObject( s3AsyncClient, new UploadRequest("bucket", "key", ByteSizeUnit.MB.toBytes(1), WritePriority.HIGH, uploadSuccess -> { // do nothing - }, false, null, true), + }, false, null, true, metadata), new StreamContext( (partIdx, partSize, position) -> new InputStreamContainer(new ZeroInputStream(partSize), partSize, position), ByteSizeUnit.MB.toBytes(1), @@ -185,12 +194,16 @@ public void testMultipartUpload() { abortMultipartUploadResponseCompletableFuture ); + Map metadata = new HashMap<>(); + metadata.put("key1", "value1"); + metadata.put("key2", "value2"); + List streams = new ArrayList<>(); CompletableFuture resultFuture = asyncTransferManager.uploadObject( s3AsyncClient, new UploadRequest("bucket", "key", ByteSizeUnit.MB.toBytes(5), WritePriority.HIGH, uploadSuccess -> { // do nothing - }, true, 3376132981L, true), + }, true, 3376132981L, true, metadata), new StreamContext((partIdx, partSize, position) -> { InputStream stream = new ZeroInputStream(partSize); streams.add(stream); @@ -246,11 +259,15 @@ public void testMultipartUploadCorruption() { abortMultipartUploadResponseCompletableFuture ); + Map metadata = new HashMap<>(); + metadata.put("key1", "value1"); + metadata.put("key2", "value2"); + CompletableFuture resultFuture = asyncTransferManager.uploadObject( s3AsyncClient, new UploadRequest("bucket", "key", ByteSizeUnit.MB.toBytes(5), WritePriority.HIGH, uploadSuccess -> { // do nothing - }, true, 0L, true), + }, true, 0L, true, metadata), new StreamContext( (partIdx, partSize, position) -> new InputStreamContainer(new ZeroInputStream(partSize), partSize, position), ByteSizeUnit.MB.toBytes(1), diff --git a/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java b/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java index e16e75de7f27d..4f5f8d4b1ef5f 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java +++ b/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java @@ -32,6 +32,7 @@ package org.opensearch.common.blobstore; +import org.opensearch.common.Nullable; import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.core.action.ActionListener; @@ -79,16 +80,16 @@ public interface BlobContainer { InputStream readBlob(String blobName) throws IOException; /** - * Creates a new {@link BlobDownloadResponse} for the given blob name. + * Creates a new {@link FetchBlobResult} for the given blob name. * * @param blobName * The name of the blob to get an {@link InputStream} for. - * @return The {@link BlobDownloadResponse} of the blob. + * @return The {@link FetchBlobResult} of the blob. * @throws NoSuchFileException if the blob does not exist * @throws IOException if the blob can not be read. */ @ExperimentalApi - default BlobDownloadResponse readBlobWithMetadata(String blobName) throws IOException { + default FetchBlobResult readBlobWithMetadata(String blobName) throws IOException { throw new UnsupportedOperationException("readBlobWithMetadata is not implemented yet"); }; @@ -166,9 +167,9 @@ default long readBlobPreferredLength() { default void writeBlobWithMetadata( String blobName, InputStream inputStream, - Map metadata, long blobSize, - boolean failIfAlreadyExists + boolean failIfAlreadyExists, + @Nullable Map metadata ) throws IOException { throw new UnsupportedOperationException("writeBlobWithMetadata is not implemented yet"); }; @@ -219,7 +220,7 @@ default void writeBlobWithMetadata( default void writeBlobAtomicWithMetadata( String blobName, InputStream inputStream, - Map metadata, + @Nullable Map metadata, long blobSize, boolean failIfAlreadyExists ) throws IOException { diff --git a/server/src/main/java/org/opensearch/common/blobstore/BlobDownloadResponse.java b/server/src/main/java/org/opensearch/common/blobstore/FetchBlobResult.java similarity index 82% rename from server/src/main/java/org/opensearch/common/blobstore/BlobDownloadResponse.java rename to server/src/main/java/org/opensearch/common/blobstore/FetchBlobResult.java index 97f3e4a16a76c..55aca771b586c 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/BlobDownloadResponse.java +++ b/server/src/main/java/org/opensearch/common/blobstore/FetchBlobResult.java @@ -8,6 +8,8 @@ package org.opensearch.common.blobstore; +import org.opensearch.common.annotation.ExperimentalApi; + import java.io.InputStream; import java.util.Map; @@ -17,7 +19,8 @@ * * @opensearch.experimental */ -public class BlobDownloadResponse { +@ExperimentalApi +public class FetchBlobResult { /** * Downloaded blob InputStream @@ -37,7 +40,7 @@ public Map getMetadata() { return metadata; } - public BlobDownloadResponse(InputStream inputStream, Map metadata) { + public FetchBlobResult(InputStream inputStream, Map metadata) { this.inputStream = inputStream; this.metadata = metadata; } diff --git a/server/src/main/java/org/opensearch/common/blobstore/stream/write/WriteContext.java b/server/src/main/java/org/opensearch/common/blobstore/stream/write/WriteContext.java index d14800e82e495..9a7fc24f7e484 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/stream/write/WriteContext.java +++ b/server/src/main/java/org/opensearch/common/blobstore/stream/write/WriteContext.java @@ -52,7 +52,7 @@ private WriteContext( CheckedConsumer uploadFinalizer, boolean doRemoteDataIntegrityCheck, @Nullable Long expectedChecksum, - Map metadata + @Nullable Map metadata ) { this.fileName = fileName; this.streamContextSupplier = streamContextSupplier; diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java b/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java index a1d5041ff9aff..bec2d78d9af62 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java @@ -15,10 +15,10 @@ import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; import org.opensearch.common.blobstore.BlobContainer; -import org.opensearch.common.blobstore.BlobDownloadResponse; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; +import org.opensearch.common.blobstore.FetchBlobResult; import org.opensearch.common.blobstore.stream.write.WriteContext; import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.common.blobstore.transfer.RemoteTransferContainer; @@ -168,7 +168,7 @@ public InputStream downloadBlob(Iterable path, String fileName) throws I @Override @ExperimentalApi - public BlobDownloadResponse downloadBlobWithMetadata(Iterable path, String fileName) throws IOException { + public FetchBlobResult downloadBlobWithMetadata(Iterable path, String fileName) throws IOException { return blobStore.blobContainer((BlobPath) path).readBlobWithMetadata(fileName); } diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TransferService.java b/server/src/main/java/org/opensearch/index/translog/transfer/TransferService.java index 5fe8b02058383..0894ebf500ebd 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TransferService.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TransferService.java @@ -9,9 +9,9 @@ package org.opensearch.index.translog.transfer; import org.opensearch.common.annotation.ExperimentalApi; -import org.opensearch.common.blobstore.BlobDownloadResponse; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.FetchBlobResult; import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.core.action.ActionListener; import org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; @@ -131,11 +131,11 @@ void uploadBlobs( * * @param path the remote path from where download should be made * @param fileName the name of the file - * @return {@link BlobDownloadResponse} of the remote file + * @return {@link FetchBlobResult} of the remote file * @throws IOException the exception while reading the data */ @ExperimentalApi - BlobDownloadResponse downloadBlobWithMetadata(Iterable path, String fileName) throws IOException; + FetchBlobResult downloadBlobWithMetadata(Iterable path, String fileName) throws IOException; void listAllInSortedOrder(Iterable path, String filenamePrefix, int limit, ActionListener> listener); From 03c13cbec0f553d47e24739806f010095481fea5 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Wed, 22 May 2024 22:02:06 -0700 Subject: [PATCH 02/10] Gradle Check Optimization (#13786) (#13787) (cherry picked from commit 5441d55ee191ca789ba86b7e477a64eb73398c80) Signed-off-by: Prudhvi Godithi Signed-off-by: github-actions[bot] Co-authored-by: github-actions[bot] --- .github/workflows/gradle-check.yml | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index 6d8fe02c7c9c3..f610ff5c31049 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -39,6 +39,22 @@ jobs: echo "pr_number=$(jq --raw-output .pull_request.number $GITHUB_EVENT_PATH)" >> $GITHUB_ENV echo "pr_owner=$(jq --raw-output .pull_request.user.login $GITHUB_EVENT_PATH)" >> $GITHUB_ENV echo "pr_or_commit_description=$(jq --ascii-output .pull_request.body $GITHUB_EVENT_PATH)" >> $GITHUB_ENV + echo "post_merge_action=false" >> $GITHUB_ENV + + # to get the PR data that can be used for post merge actions + - uses: actions/github-script@v7 + if: github.event_name == 'push' + id: get_pr_data + with: + github-token: ${{secrets.GITHUB_TOKEN}} + script: | + return ( + await github.rest.repos.listPullRequestsAssociatedWithCommit({ + commit_sha: context.sha, + owner: context.repo.owner, + repo: context.repo.repo, + }) + ).data[0]; - name: Setup environment variables (Push) if: github.event_name == 'push' @@ -53,8 +69,9 @@ jobs: echo "pr_to_clone_url=$repo_url" >> $GITHUB_ENV echo "pr_title=Push trigger $branch_name $ref_id $repo_url" >> $GITHUB_ENV echo "pr_owner=$(jq --raw-output '.commits[0].author.username' $GITHUB_EVENT_PATH)" >> $GITHUB_ENV - echo "pr_number=Null" >> $GITHUB_ENV + echo 'pr_number=${{ fromJson(steps.get_pr_data.outputs.result).number }}' >> $GITHUB_ENV echo "pr_or_commit_description=$(jq --ascii-output .head_commit.message $GITHUB_EVENT_PATH)" >> $GITHUB_ENV + echo "post_merge_action=true" >> $GITHUB_ENV - name: Checkout opensearch-build repo uses: actions/checkout@v4 From edcbdafb20e2c0afae36ca45f4ad66ad8ed12ed4 Mon Sep 17 00:00:00 2001 From: Sandeep Kumawat <2025sandeepkumawat@gmail.com> Date: Thu, 23 May 2024 15:24:28 +0530 Subject: [PATCH 03/10] [Remote Store] Upload translog checkpoint as object metadata to translog (#13637) (#13795) * Upload translog checkpoint as object metadata to translog Signed-off-by: Sandeep Kumawat <2025sandeepkumawat@gmail.com> --- CHANGELOG.md | 1 + .../repositories/s3/S3BlobContainer.java | 6 +- .../repositories/s3/S3BlobStore.java | 5 + .../indices/create/RemoteCloneIndexIT.java | 3 + .../indices/create/RemoteShrinkIndexIT.java | 9 + .../indices/create/RemoteSplitIndexIT.java | 2 + .../remote/RemoteClusterStateServiceIT.java | 6 + .../RemoteMigrationIndexMetadataUpdateIT.java | 1 + .../remotestore/BaseRemoteStoreRestoreIT.java | 7 +- .../remotestore/PrimaryTermValidationIT.java | 3 +- .../RemoteStoreBaseIntegTestCase.java | 47 +++- .../RemoteStoreClusterStateRestoreIT.java | 6 + .../remotestore/RemoteStoreForceMergeIT.java | 5 +- .../opensearch/remotestore/RemoteStoreIT.java | 26 +- .../RemoteStoreRepositoryRegistrationIT.java | 4 +- .../remotestore/RemoteStoreStatsIT.java | 2 +- .../RemoteStoreUploadIndexPathIT.java | 1 + .../MockFsMetadataSupportedBlobContainer.java | 92 +++++++ .../MockFsMetadataSupportedBlobStore.java | 44 +++ .../MockFsMetadataSupportedRepository.java | 51 ++++ ...ckFsMetadataSupportedRepositoryPlugin.java | 38 +++ .../cluster/metadata/IndexMetadata.java | 1 + .../metadata/MetadataCreateIndexService.java | 33 ++- .../allocation/IndexMetadataUpdater.java | 2 +- .../common/blobstore/BlobContainer.java | 6 +- .../common/blobstore/BlobStore.java | 7 + ...sult.java => InputStreamWithMetadata.java} | 12 +- .../common/settings/ClusterSettings.java | 3 +- .../org/opensearch/index/IndexSettings.java | 7 + .../RemoteMigrationIndexMetadataUpdater.java | 22 +- .../RemoteStoreCustomMetadataResolver.java | 74 ++++++ .../RemoteStorePathStrategyResolver.java | 44 --- .../index/remote/RemoteStoreUtils.java | 32 ++- .../opensearch/index/shard/IndexShard.java | 12 +- .../index/translog/RemoteFsTranslog.java | 31 ++- .../transfer/BlobStoreTransferService.java | 41 ++- .../index/translog/transfer/FileSnapshot.java | 10 + .../translog/transfer/TransferService.java | 6 +- .../translog/transfer/TransferSnapshot.java | 7 + .../TranslogCheckpointTransferSnapshot.java | 10 + .../transfer/TranslogTransferManager.java | 104 ++++++-- .../indices/RemoteStoreSettings.java | 24 ++ .../main/java/org/opensearch/node/Node.java | 3 +- .../opensearch/snapshots/RestoreService.java | 2 +- .../MetadataRolloverServiceTests.java | 9 +- .../MetadataCreateIndexServiceTests.java | 62 +++-- .../MetadataIndexTemplateServiceTests.java | 10 +- ...oteMigrationIndexMetadataUpdaterTests.java | 13 +- ...emoteStoreCustomMetadataResolverTests.java | 223 ++++++++++++++++ .../RemoteStorePathStrategyResolverTests.java | 148 ----------- .../index/remote/RemoteStoreUtilsTests.java | 57 ++++ .../BlobStoreTransferServiceTests.java | 40 +++ .../TranslogTransferManagerTests.java | 251 +++++++++++++----- .../indices/cluster/ClusterStateChanges.java | 3 +- .../snapshots/SnapshotResiliencyTests.java | 3 +- .../test/OpenSearchIntegTestCase.java | 1 + 56 files changed, 1287 insertions(+), 385 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedBlobContainer.java create mode 100644 server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedBlobStore.java create mode 100644 server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedRepository.java create mode 100644 server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedRepositoryPlugin.java rename server/src/main/java/org/opensearch/common/blobstore/{FetchBlobResult.java => InputStreamWithMetadata.java} (74%) create mode 100644 server/src/main/java/org/opensearch/index/remote/RemoteStoreCustomMetadataResolver.java delete mode 100644 server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java create mode 100644 server/src/test/java/org/opensearch/index/remote/RemoteStoreCustomMetadataResolverTests.java delete mode 100644 server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index cbbfc18a7c9b2..d52efd755b892 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add support for Azure Managed Identity in repository-azure ([#12423](https://github.com/opensearch-project/OpenSearch/issues/12423)) - Add useCompoundFile index setting ([#13478](https://github.com/opensearch-project/OpenSearch/pull/13478)) - Make outbound side of transport protocol dependent ([#13293](https://github.com/opensearch-project/OpenSearch/pull/13293)) +- [Remote Store] Upload translog checkpoint as object metadata to translog.tlog([#13637](https://github.com/opensearch-project/OpenSearch/pull/13637)) ### Dependencies - Bump `com.github.spullara.mustache.java:compiler` from 0.9.10 to 0.9.13 ([#13329](https://github.com/opensearch-project/OpenSearch/pull/13329), [#13559](https://github.com/opensearch-project/OpenSearch/pull/13559)) diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java index acf0c5e83a17b..b489a3cc85037 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java @@ -78,7 +78,7 @@ import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStoreException; import org.opensearch.common.blobstore.DeleteResult; -import org.opensearch.common.blobstore.FetchBlobResult; +import org.opensearch.common.blobstore.InputStreamWithMetadata; import org.opensearch.common.blobstore.stream.read.ReadContext; import org.opensearch.common.blobstore.stream.write.WriteContext; import org.opensearch.common.blobstore.stream.write.WritePriority; @@ -143,9 +143,9 @@ public boolean blobExists(String blobName) { @ExperimentalApi @Override - public FetchBlobResult readBlobWithMetadata(String blobName) throws IOException { + public InputStreamWithMetadata readBlobWithMetadata(String blobName) throws IOException { S3RetryingInputStream s3RetryingInputStream = new S3RetryingInputStream(blobStore, buildKey(blobName)); - return new FetchBlobResult(s3RetryingInputStream, s3RetryingInputStream.getMetadata()); + return new InputStreamWithMetadata(s3RetryingInputStream, s3RetryingInputStream.getMetadata()); } @Override diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java index de815f9202f44..f688be9216b8f 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java @@ -244,6 +244,11 @@ public Map> extendedStats() { return extendedStats; } + @Override + public boolean isBlobMetadataEnabled() { + return true; + } + public ObjectCannedACL getCannedACL() { return cannedACL; } diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java index 4be049c9a9109..a1122f279c7e4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java @@ -52,6 +52,7 @@ import org.opensearch.index.query.TermsQueryBuilder; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; import java.util.concurrent.ExecutionException; @@ -60,6 +61,7 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.equalTo; +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteCloneIndexIT extends RemoteStoreBaseIntegTestCase { @Override @@ -139,6 +141,7 @@ public void testCreateCloneIndex() { } public void testCreateCloneIndexFailure() throws ExecutionException, InterruptedException { + asyncUploadMockFsRepo = false; Version version = VersionUtils.randomIndexCompatibleVersion(random()); int numPrimaryShards = 1; prepareCreate("source").setSettings( diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteShrinkIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteShrinkIndexIT.java index 282eb9c6ad95e..cd19a0ee1ff77 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteShrinkIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteShrinkIndexIT.java @@ -48,7 +48,9 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; +import org.junit.Before; import java.util.Arrays; import java.util.Map; @@ -61,12 +63,18 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteShrinkIndexIT extends RemoteStoreBaseIntegTestCase { @Override protected boolean forbidPrivateIndexSettings() { return false; } + @Before + public void setup() { + asyncUploadMockFsRepo = false; + } + public Settings indexSettings() { return Settings.builder() .put(super.indexSettings()) @@ -84,6 +92,7 @@ public void testCreateShrinkIndexToN() { int[] shardSplits = randomFrom(possibleShardSplits); assertEquals(shardSplits[0], (shardSplits[0] / shardSplits[1]) * shardSplits[1]); assertEquals(shardSplits[1], (shardSplits[1] / shardSplits[2]) * shardSplits[2]); + internalCluster().ensureAtLeastNumDataNodes(2); prepareCreate("source").setSettings(Settings.builder().put(indexSettings()).put("number_of_shards", shardSplits[0])).get(); for (int i = 0; i < 20; i++) { diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteSplitIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteSplitIndexIT.java index dd4252d24f314..dc3c8793a93f6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteSplitIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteSplitIndexIT.java @@ -67,6 +67,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; import java.io.IOException; @@ -86,6 +87,7 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.equalTo; +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteSplitIndexIT extends RemoteStoreBaseIntegTestCase { @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java index dfde1b958882c..07377caaba0bc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java @@ -18,6 +18,7 @@ import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.Before; import java.nio.charset.StandardCharsets; import java.util.Base64; @@ -32,6 +33,11 @@ public class RemoteClusterStateServiceIT extends RemoteStoreBaseIntegTestCase { private static String INDEX_NAME = "test-index"; + @Before + public void setup() { + asyncUploadMockFsRepo = false; + } + @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true).build(); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationIndexMetadataUpdateIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationIndexMetadataUpdateIT.java index 45679598dc551..53f4ef3fe281f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationIndexMetadataUpdateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationIndexMetadataUpdateIT.java @@ -512,5 +512,6 @@ private void assertCustomIndexMetadata(String index) { logger.info("---> Asserting custom index metadata"); IndexMetadata iMd = internalCluster().client().admin().cluster().prepareState().get().getState().metadata().index(index); assertNotNull(iMd.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY)); + assertNotNull(iMd.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY).get(IndexMetadata.TRANSLOG_METADATA_KEY)); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java index d29dacb001434..280fd13f0fdcf 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java @@ -11,15 +11,18 @@ import org.opensearch.action.index.IndexResponse; import org.opensearch.common.settings.Settings; import org.opensearch.plugins.Plugin; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; -import java.util.Arrays; import java.util.Collection; import java.util.Map; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class BaseRemoteStoreRestoreIT extends RemoteStoreBaseIntegTestCase { static final String INDEX_NAME = "remote-store-test-idx-1"; static final String INDEX_NAMES = "test-remote-store-1,test-remote-store-2,remote-store-test-index-1,remote-store-test-index-2"; @@ -39,7 +42,7 @@ public Settings indexSettings(int shards, int replicas) { @Override protected Collection> nodePlugins() { - return Arrays.asList(MockTransportService.TestPlugin.class); + return Stream.concat(super.nodePlugins().stream(), Stream.of(MockTransportService.TestPlugin.class)).collect(Collectors.toList()); } protected void restore(String... indices) { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/PrimaryTermValidationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/PrimaryTermValidationIT.java index e14a4062f7775..6b94e638a6876 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/PrimaryTermValidationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/PrimaryTermValidationIT.java @@ -30,7 +30,6 @@ import org.junit.Before; import java.nio.file.Path; -import java.util.Arrays; import java.util.Collection; import java.util.HashSet; import java.util.Set; @@ -50,7 +49,7 @@ public class PrimaryTermValidationIT extends RemoteStoreBaseIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(MockTransportService.TestPlugin.class); + return Stream.concat(super.nodePlugins().stream(), Stream.of(MockTransportService.TestPlugin.class)).collect(Collectors.toList()); } @Before diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java index d7ad0daa43524..740aee69f7d80 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java @@ -36,6 +36,9 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.plugins.Plugin; +import org.opensearch.remotestore.multipart.mocks.MockFsRepositoryPlugin; +import org.opensearch.remotestore.translogmetadata.mocks.MockFsMetadataSupportedRepositoryPlugin; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.fs.ReloadableFsRepository; @@ -48,6 +51,7 @@ import java.nio.file.Path; import java.nio.file.SimpleFileVisitor; import java.nio.file.attribute.BasicFileAttributes; +import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Locale; @@ -55,6 +59,7 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; @@ -74,6 +79,8 @@ public class RemoteStoreBaseIntegTestCase extends OpenSearchIntegTestCase { protected Path segmentRepoPath; protected Path translogRepoPath; protected boolean clusterSettingsSuppliedByTest = false; + protected boolean asyncUploadMockFsRepo = randomBoolean(); + private boolean metadataSupportedType = randomBoolean(); private final List documentKeys = List.of( randomAlphaOfLength(5), randomAlphaOfLength(5), @@ -129,6 +136,19 @@ protected Map indexData(int numberOfIterations, boolean invokeFlus return indexingStats; } + @Override + protected Collection> nodePlugins() { + if (!clusterSettingsSuppliedByTest && asyncUploadMockFsRepo) { + if (metadataSupportedType) { + return Stream.concat(super.nodePlugins().stream(), Stream.of(MockFsMetadataSupportedRepositoryPlugin.class)) + .collect(Collectors.toList()); + } else { + return Stream.concat(super.nodePlugins().stream(), Stream.of(MockFsRepositoryPlugin.class)).collect(Collectors.toList()); + } + } + return super.nodePlugins(); + } + @Override protected Settings nodeSettings(int nodeOrdinal) { if (segmentRepoPath == null || translogRepoPath == null) { @@ -138,10 +158,27 @@ protected Settings nodeSettings(int nodeOrdinal) { if (clusterSettingsSuppliedByTest) { return Settings.builder().put(super.nodeSettings(nodeOrdinal)).build(); } else { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put(remoteStoreClusterSettings(REPOSITORY_NAME, segmentRepoPath, REPOSITORY_2_NAME, translogRepoPath)) - .build(); + if (asyncUploadMockFsRepo) { + String repoType = metadataSupportedType ? MockFsMetadataSupportedRepositoryPlugin.TYPE_MD : MockFsRepositoryPlugin.TYPE; + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put( + remoteStoreClusterSettings( + REPOSITORY_NAME, + segmentRepoPath, + repoType, + REPOSITORY_2_NAME, + translogRepoPath, + repoType + ) + ) + .build(); + } else { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(remoteStoreClusterSettings(REPOSITORY_NAME, segmentRepoPath, REPOSITORY_2_NAME, translogRepoPath)) + .build(); + } } } @@ -221,6 +258,8 @@ protected Settings remoteStoreIndexSettings(int numberOfReplicas, long totalFiel @After public void teardown() { clusterSettingsSuppliedByTest = false; + asyncUploadMockFsRepo = randomBoolean(); + metadataSupportedType = randomBoolean(); assertRemoteStoreRepositoryOnAllNodes(REPOSITORY_NAME); assertRemoteStoreRepositoryOnAllNodes(REPOSITORY_2_NAME); clusterAdmin().prepareCleanupRepository(REPOSITORY_NAME).get(); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java index 3f90732f1f13d..8f27c25c56667 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java @@ -24,6 +24,7 @@ import org.opensearch.gateway.remote.RemoteClusterStateService; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.Before; import java.io.IOException; import java.nio.file.Files; @@ -47,6 +48,11 @@ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteStoreClusterStateRestoreIT extends BaseRemoteStoreRestoreIT { + @Before + public void setup() { + asyncUploadMockFsRepo = false; + } + @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true).build(); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreForceMergeIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreForceMergeIT.java index 0bcde4b44c734..d957dda1ba04f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreForceMergeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreForceMergeIT.java @@ -19,11 +19,12 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; @@ -37,7 +38,7 @@ public class RemoteStoreForceMergeIT extends RemoteStoreBaseIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(MockTransportService.TestPlugin.class); + return Stream.concat(super.nodePlugins().stream(), Stream.of(MockTransportService.TestPlugin.class)).collect(Collectors.toList()); } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index ca0ae3ca9a700..7721b18a4fe6b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -38,7 +38,6 @@ import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.plugins.Plugin; -import org.opensearch.remotestore.multipart.mocks.MockFsRepositoryPlugin; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; @@ -48,7 +47,6 @@ import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; -import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.Map; @@ -56,6 +54,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; import java.util.stream.Stream; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; @@ -81,7 +80,7 @@ public class RemoteStoreIT extends RemoteStoreBaseIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(MockTransportService.TestPlugin.class, MockFsRepositoryPlugin.class); + return Stream.concat(super.nodePlugins().stream(), Stream.of(MockTransportService.TestPlugin.class)).collect(Collectors.toList()); } @Override @@ -797,25 +796,8 @@ public void testResumeUploadAfterFailedPrimaryRelocation() throws ExecutionExcep // Test local only translog files which are not uploaded to remote store (no metadata present in remote) // Without the cleanup change in RemoteFsTranslog.createEmptyTranslog, this test fails with NPE. public void testLocalOnlyTranslogCleanupOnNodeRestart() throws Exception { - clusterSettingsSuppliedByTest = true; - - // Overriding settings to use AsyncMultiStreamBlobContainer - Settings settings = Settings.builder() - .put(super.nodeSettings(1)) - .put( - remoteStoreClusterSettings( - REPOSITORY_NAME, - segmentRepoPath, - MockFsRepositoryPlugin.TYPE, - REPOSITORY_2_NAME, - translogRepoPath, - MockFsRepositoryPlugin.TYPE - ) - ) - .build(); - - internalCluster().startClusterManagerOnlyNode(settings); - String dataNode = internalCluster().startDataOnlyNode(settings); + internalCluster().startClusterManagerOnlyNode(); + String dataNode = internalCluster().startDataOnlyNode(); // 1. Create index with 0 replica createIndex(INDEX_NAME, remoteStoreIndexSettings(0, 10000L, -1)); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java index ef2dcf3217df6..b0827dcfe4892 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java @@ -25,11 +25,11 @@ import org.opensearch.test.transport.MockTransportService; import java.io.IOException; -import java.util.Arrays; import java.util.Collection; import java.util.HashSet; import java.util.Set; import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.opensearch.repositories.blobstore.BlobStoreRepository.SYSTEM_REPOSITORY_SETTING; @@ -38,7 +38,7 @@ public class RemoteStoreRepositoryRegistrationIT extends RemoteStoreBaseIntegTes @Override protected Collection> nodePlugins() { - return Arrays.asList(MockTransportService.TestPlugin.class); + return Stream.concat(super.nodePlugins().stream(), Stream.of(MockTransportService.TestPlugin.class)).collect(Collectors.toList()); } public void testSingleNodeClusterRepositoryRegistration() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java index cb5e2e911705b..7715e19ef349d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java @@ -54,7 +54,7 @@ public class RemoteStoreStatsIT extends RemoteStoreBaseIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(MockTransportService.TestPlugin.class); + return Stream.concat(super.nodePlugins().stream(), Stream.of(MockTransportService.TestPlugin.class)).collect(Collectors.toList()); } public void setup() { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreUploadIndexPathIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreUploadIndexPathIT.java index 9b30dacfced13..44c02dbb6d611 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreUploadIndexPathIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreUploadIndexPathIT.java @@ -41,6 +41,7 @@ protected Settings nodeSettings(int nodeOrdinal) { * wherever not required. */ public void testRemoteIndexPathFileCreation() throws ExecutionException, InterruptedException, IOException { + asyncUploadMockFsRepo = false; String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); internalCluster().startDataOnlyNodes(2); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedBlobContainer.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedBlobContainer.java new file mode 100644 index 0000000000000..109a884ff6c5d --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedBlobContainer.java @@ -0,0 +1,92 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore.translogmetadata.mocks; + +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.InputStreamWithMetadata; +import org.opensearch.common.blobstore.fs.FsBlobStore; +import org.opensearch.common.blobstore.stream.write.WriteContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.remotestore.multipart.mocks.MockFsAsyncBlobContainer; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Path; +import java.util.Base64; +import java.util.HashMap; +import java.util.Map; + +public class MockFsMetadataSupportedBlobContainer extends MockFsAsyncBlobContainer { + + private static String CHECKPOINT_FILE_DATA_KEY = "ckp-data"; + + public MockFsMetadataSupportedBlobContainer(FsBlobStore blobStore, BlobPath blobPath, Path path, boolean triggerDataIntegrityFailure) { + super(blobStore, blobPath, path, triggerDataIntegrityFailure); + } + + @Override + public void asyncBlobUpload(WriteContext writeContext, ActionListener completionListener) throws IOException { + // If the upload writeContext have a non-null metadata, we store the metadata content as translog.ckp file. + if (writeContext.getMetadata() != null) { + String base64String = writeContext.getMetadata().get(CHECKPOINT_FILE_DATA_KEY); + byte[] decodedBytes = Base64.getDecoder().decode(base64String); + ByteArrayInputStream inputStream = new ByteArrayInputStream(decodedBytes); + int length = decodedBytes.length; + String ckpFileName = getCheckpointFileName(writeContext.getFileName()); + writeBlob(ckpFileName, inputStream, length, true); + } + super.asyncBlobUpload(writeContext, completionListener); + } + + // This is utility to get the translog.ckp file name for a given translog.tlog file. + private String getCheckpointFileName(String translogFileName) { + if (!translogFileName.endsWith(".tlog")) { + throw new IllegalArgumentException("Invalid translog file name format: " + translogFileName); + } + + int dotIndex = translogFileName.lastIndexOf('.'); + String baseName = translogFileName.substring(0, dotIndex); + return baseName + ".ckp"; + } + + public static String convertToBase64(InputStream inputStream) throws IOException { + try (ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream()) { + byte[] buffer = new byte[128]; + int bytesRead; + int totalBytesRead = 0; + + while ((bytesRead = inputStream.read(buffer)) != -1) { + byteArrayOutputStream.write(buffer, 0, bytesRead); + totalBytesRead += bytesRead; + if (totalBytesRead > 1024) { + // We enforce a limit of 1KB on the size of the checkpoint file. + throw new AssertionError("Input stream exceeds 1KB limit"); + } + } + + byte[] bytes = byteArrayOutputStream.toByteArray(); + return Base64.getEncoder().encodeToString(bytes); + } + } + + // during readBlobWithMetadata call we separately download translog.ckp file and return it as metadata. + @Override + public InputStreamWithMetadata readBlobWithMetadata(String blobName) throws IOException { + String ckpFileName = getCheckpointFileName(blobName); + InputStream inputStream = readBlob(blobName); + try (InputStream ckpInputStream = readBlob(ckpFileName)) { + String ckpString = convertToBase64(ckpInputStream); + Map metadata = new HashMap<>(); + metadata.put(CHECKPOINT_FILE_DATA_KEY, ckpString); + return new InputStreamWithMetadata(inputStream, metadata); + } + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedBlobStore.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedBlobStore.java new file mode 100644 index 0000000000000..89dd91c8222ac --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedBlobStore.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore.translogmetadata.mocks; + +import org.opensearch.OpenSearchException; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.fs.FsBlobStore; + +import java.io.IOException; +import java.nio.file.Path; + +public class MockFsMetadataSupportedBlobStore extends FsBlobStore { + + private final boolean triggerDataIntegrityFailure; + + public MockFsMetadataSupportedBlobStore(int bufferSizeInBytes, Path path, boolean readonly, boolean triggerDataIntegrityFailure) + throws IOException { + super(bufferSizeInBytes, path, readonly); + this.triggerDataIntegrityFailure = triggerDataIntegrityFailure; + } + + @Override + public BlobContainer blobContainer(BlobPath path) { + try { + return new MockFsMetadataSupportedBlobContainer(this, path, buildAndCreate(path), triggerDataIntegrityFailure); + } catch (IOException ex) { + throw new OpenSearchException("failed to create blob container", ex); + } + } + + // Make MockFs metadata supported + @Override + public boolean isBlobMetadataEnabled() { + return true; + } + +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedRepository.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedRepository.java new file mode 100644 index 0000000000000..333fba413ce4e --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedRepository.java @@ -0,0 +1,51 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore.translogmetadata.mocks; + +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.blobstore.BlobStore; +import org.opensearch.common.blobstore.fs.FsBlobStore; +import org.opensearch.common.settings.Setting; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.env.Environment; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.repositories.fs.FsRepository; + +public class MockFsMetadataSupportedRepository extends FsRepository { + + public static Setting TRIGGER_DATA_INTEGRITY_FAILURE = Setting.boolSetting( + "mock_fs_repository.trigger_data_integrity_failure", + false + ); + + private final boolean triggerDataIntegrityFailure; + + public MockFsMetadataSupportedRepository( + RepositoryMetadata metadata, + Environment environment, + NamedXContentRegistry namedXContentRegistry, + ClusterService clusterService, + RecoverySettings recoverySettings + ) { + super(metadata, environment, namedXContentRegistry, clusterService, recoverySettings); + triggerDataIntegrityFailure = TRIGGER_DATA_INTEGRITY_FAILURE.get(metadata.settings()); + } + + @Override + protected BlobStore createBlobStore() throws Exception { + FsBlobStore fsBlobStore = (FsBlobStore) super.createBlobStore(); + return new MockFsMetadataSupportedBlobStore( + fsBlobStore.bufferSizeInBytes(), + fsBlobStore.path(), + isReadOnly(), + triggerDataIntegrityFailure + ); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedRepositoryPlugin.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedRepositoryPlugin.java new file mode 100644 index 0000000000000..71ae652a6b23d --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedRepositoryPlugin.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore.translogmetadata.mocks; + +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.env.Environment; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.RepositoryPlugin; +import org.opensearch.repositories.Repository; + +import java.util.Collections; +import java.util.Map; + +public class MockFsMetadataSupportedRepositoryPlugin extends Plugin implements RepositoryPlugin { + + public static final String TYPE_MD = "fs_metadata_supported_repository"; + + @Override + public Map getRepositories( + Environment env, + NamedXContentRegistry namedXContentRegistry, + ClusterService clusterService, + RecoverySettings recoverySettings + ) { + return Collections.singletonMap( + "fs_metadata_supported_repository", + metadata -> new MockFsMetadataSupportedRepository(metadata, env, namedXContentRegistry, clusterService, recoverySettings) + ); + } +} diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index b8cb9e684fcec..fdf8c7a74df68 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -636,6 +636,7 @@ public static APIBlock readFrom(StreamInput input) throws IOException { static final String KEY_SYSTEM = "system"; public static final String KEY_PRIMARY_TERMS = "primary_terms"; public static final String REMOTE_STORE_CUSTOM_KEY = "remote_store"; + public static final String TRANSLOG_METADATA_KEY = "translog_metadata"; public static final String INDEX_STATE_FILE_PREFIX = "state-"; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index d42d647aad80f..7829d42b803ef 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -90,10 +90,10 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.MapperService.MergeReason; import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.remote.RemoteStoreCustomMetadataResolver; import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.index.remote.RemoteStorePathStrategy; -import org.opensearch.index.remote.RemoteStorePathStrategyResolver; import org.opensearch.index.shard.IndexSettingProvider; import org.opensearch.index.translog.Translog; import org.opensearch.indices.IndexCreationException; @@ -105,6 +105,7 @@ import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.repositories.RepositoriesService; import org.opensearch.threadpool.ThreadPool; import java.io.IOException; @@ -178,7 +179,7 @@ public class MetadataCreateIndexService { private AwarenessReplicaBalance awarenessReplicaBalance; @Nullable - private final RemoteStorePathStrategyResolver remoteStorePathStrategyResolver; + private final RemoteStoreCustomMetadataResolver remoteStoreCustomMetadataResolver; public MetadataCreateIndexService( final Settings settings, @@ -194,7 +195,8 @@ public MetadataCreateIndexService( final SystemIndices systemIndices, final boolean forbidPrivateIndexSettings, final AwarenessReplicaBalance awarenessReplicaBalance, - final RemoteStoreSettings remoteStoreSettings + final RemoteStoreSettings remoteStoreSettings, + final Supplier repositoriesServiceSupplier ) { this.settings = settings; this.clusterService = clusterService; @@ -213,8 +215,8 @@ public MetadataCreateIndexService( // Task is onboarded for throttling, it will get retried from associated TransportClusterManagerNodeAction. createIndexTaskKey = clusterService.registerClusterManagerTask(ClusterManagerTaskKeys.CREATE_INDEX_KEY, true); Supplier minNodeVersionSupplier = () -> clusterService.state().nodes().getMinNodeVersion(); - remoteStorePathStrategyResolver = isRemoteDataAttributePresent(settings) - ? new RemoteStorePathStrategyResolver(remoteStoreSettings, minNodeVersionSupplier) + remoteStoreCustomMetadataResolver = isRemoteDataAttributePresent(settings) + ? new RemoteStoreCustomMetadataResolver(remoteStoreSettings, minNodeVersionSupplier, repositoriesServiceSupplier, settings) : null; } @@ -563,7 +565,7 @@ IndexMetadata buildAndValidateTemporaryIndexMetadata( tmpImdBuilder.setRoutingNumShards(routingNumShards); tmpImdBuilder.settings(indexSettings); tmpImdBuilder.system(isSystem); - addRemoteStorePathStrategyInCustomData(tmpImdBuilder, true); + addRemoteStoreCustomMetadata(tmpImdBuilder, true); // Set up everything, now locally create the index to see that things are ok, and apply IndexMetadata tempMetadata = tmpImdBuilder.build(); @@ -573,13 +575,13 @@ IndexMetadata buildAndValidateTemporaryIndexMetadata( } /** - * Adds the remote store path type information in custom data of index metadata. + * Adds the 1) remote store path type 2) ckp as translog metadata information in custom data of index metadata. * * @param tmpImdBuilder index metadata builder. * @param assertNullOldType flag to verify that the old remote store path type is null */ - public void addRemoteStorePathStrategyInCustomData(IndexMetadata.Builder tmpImdBuilder, boolean assertNullOldType) { - if (remoteStorePathStrategyResolver == null) { + public void addRemoteStoreCustomMetadata(IndexMetadata.Builder tmpImdBuilder, boolean assertNullOldType) { + if (remoteStoreCustomMetadataResolver == null) { return; } // It is possible that remote custom data exists already. In such cases, we need to only update the path type @@ -587,14 +589,21 @@ public void addRemoteStorePathStrategyInCustomData(IndexMetadata.Builder tmpImdB Map existingCustomData = tmpImdBuilder.removeCustom(IndexMetadata.REMOTE_STORE_CUSTOM_KEY); assert assertNullOldType == false || Objects.isNull(existingCustomData); - // Determine the path type for use using the remoteStorePathResolver. - RemoteStorePathStrategy newPathStrategy = remoteStorePathStrategyResolver.get(); Map remoteCustomData = new HashMap<>(); + + // Determine if the ckp would be stored as translog metadata + boolean isTranslogMetadataEnabled = remoteStoreCustomMetadataResolver.isTranslogMetadataEnabled(); + remoteCustomData.put(IndexMetadata.TRANSLOG_METADATA_KEY, Boolean.toString(isTranslogMetadataEnabled)); + + // Determine the path type for use using the remoteStorePathResolver. + RemoteStorePathStrategy newPathStrategy = remoteStoreCustomMetadataResolver.getPathStrategy(); remoteCustomData.put(PathType.NAME, newPathStrategy.getType().name()); if (Objects.nonNull(newPathStrategy.getHashAlgorithm())) { remoteCustomData.put(PathHashAlgorithm.NAME, newPathStrategy.getHashAlgorithm().name()); } - logger.trace(() -> new ParameterizedMessage("Added newStrategy={}, replaced oldStrategy={}", remoteCustomData, existingCustomData)); + logger.trace( + () -> new ParameterizedMessage("Added newCustomData={}, replaced oldCustomData={}", remoteCustomData, existingCustomData) + ); tmpImdBuilder.putCustom(IndexMetadata.REMOTE_STORE_CUSTOM_KEY, remoteCustomData); } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java index ddcccd597e894..2431f57a6a1f9 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java @@ -176,7 +176,7 @@ public Metadata applyChanges(Metadata oldMetadata, RoutingTable newRoutingTable, oldMetadata.settings(), logger ); - migrationImdUpdater.maybeUpdateRemoteStorePathStrategy(indexMetadataBuilder, index.getName()); + migrationImdUpdater.maybeUpdateRemoteStoreCustomMetadata(indexMetadataBuilder, index.getName()); migrationImdUpdater.maybeAddRemoteIndexSettings(indexMetadataBuilder, index.getName()); } } diff --git a/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java b/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java index 4f5f8d4b1ef5f..a2e4199029ef4 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java +++ b/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java @@ -80,16 +80,16 @@ public interface BlobContainer { InputStream readBlob(String blobName) throws IOException; /** - * Creates a new {@link FetchBlobResult} for the given blob name. + * Creates a new {@link InputStreamWithMetadata} for the given blob name. * * @param blobName * The name of the blob to get an {@link InputStream} for. - * @return The {@link FetchBlobResult} of the blob. + * @return The {@link InputStreamWithMetadata} of the blob. * @throws NoSuchFileException if the blob does not exist * @throws IOException if the blob can not be read. */ @ExperimentalApi - default FetchBlobResult readBlobWithMetadata(String blobName) throws IOException { + default InputStreamWithMetadata readBlobWithMetadata(String blobName) throws IOException { throw new UnsupportedOperationException("readBlobWithMetadata is not implemented yet"); }; diff --git a/server/src/main/java/org/opensearch/common/blobstore/BlobStore.java b/server/src/main/java/org/opensearch/common/blobstore/BlobStore.java index 8ce8ec8e01abe..406ccc6aa4a18 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/BlobStore.java +++ b/server/src/main/java/org/opensearch/common/blobstore/BlobStore.java @@ -71,6 +71,13 @@ default Map> extendedStats() { */ default void reload(RepositoryMetadata repositoryMetadata) {} + /** + * Returns a boolean indicating if blobStore has object metadata support enabled + */ + default boolean isBlobMetadataEnabled() { + return false; + } + /** * Metrics for BlobStore interactions */ diff --git a/server/src/main/java/org/opensearch/common/blobstore/FetchBlobResult.java b/server/src/main/java/org/opensearch/common/blobstore/InputStreamWithMetadata.java similarity index 74% rename from server/src/main/java/org/opensearch/common/blobstore/FetchBlobResult.java rename to server/src/main/java/org/opensearch/common/blobstore/InputStreamWithMetadata.java index 55aca771b586c..aa307e260e033 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/FetchBlobResult.java +++ b/server/src/main/java/org/opensearch/common/blobstore/InputStreamWithMetadata.java @@ -10,6 +10,8 @@ import org.opensearch.common.annotation.ExperimentalApi; +import java.io.Closeable; +import java.io.IOException; import java.io.InputStream; import java.util.Map; @@ -20,7 +22,7 @@ * @opensearch.experimental */ @ExperimentalApi -public class FetchBlobResult { +public class InputStreamWithMetadata implements Closeable { /** * Downloaded blob InputStream @@ -40,9 +42,15 @@ public Map getMetadata() { return metadata; } - public FetchBlobResult(InputStream inputStream, Map metadata) { + public InputStreamWithMetadata(InputStream inputStream, Map metadata) { this.inputStream = inputStream; this.metadata = metadata; } + @Override + public void close() throws IOException { + if (inputStream != null) { + inputStream.close(); + } + } } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 5da15caaa8777..72a7df6ecc189 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -741,7 +741,8 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_TRANSFER_TIMEOUT_SETTING, RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING, RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING, - RemoteStoreSettings.CLUSTER_REMOTE_MAX_TRANSLOG_READERS + RemoteStoreSettings.CLUSTER_REMOTE_MAX_TRANSLOG_READERS, + RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_METADATA ) ) ); diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 3636521f181d4..a7be456e3996f 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -763,6 +763,7 @@ public static IndexMergePolicy fromString(String text) { private final boolean widenIndexSortType; private final boolean assignedOnRemoteNode; private final RemoteStorePathStrategy remoteStorePathStrategy; + private final boolean isTranslogMetadataEnabled; /** * The maximum age of a retention lease before it is considered expired. @@ -988,6 +989,8 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti assignedOnRemoteNode = RemoteStoreNodeAttribute.isRemoteDataAttributePresent(this.getNodeSettings()); remoteStorePathStrategy = RemoteStoreUtils.determineRemoteStorePathStrategy(indexMetadata); + isTranslogMetadataEnabled = RemoteStoreUtils.determineTranslogMetadataEnabled(indexMetadata); + setEnableFuzzySetForDocId(scopedSettings.get(INDEX_DOC_ID_FUZZY_SET_ENABLED_SETTING)); setDocIdFuzzySetFalsePositiveProbability(scopedSettings.get(INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING)); @@ -1911,4 +1914,8 @@ public void setDocIdFuzzySetFalsePositiveProbability(double docIdFuzzySetFalsePo public RemoteStorePathStrategy getRemoteStorePathStrategy() { return remoteStorePathStrategy; } + + public boolean isTranslogMetadataEnabled() { + return isTranslogMetadataEnabled; + } } diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteMigrationIndexMetadataUpdater.java b/server/src/main/java/org/opensearch/index/remote/RemoteMigrationIndexMetadataUpdater.java index 761fa20ea64e5..cc51fcd2f18f6 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteMigrationIndexMetadataUpdater.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteMigrationIndexMetadataUpdater.java @@ -28,7 +28,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; -import static org.opensearch.index.remote.RemoteStoreUtils.determineRemoteStorePathStrategyDuringMigration; +import static org.opensearch.index.remote.RemoteStoreUtils.determineRemoteStoreCustomMetadataDuringMigration; import static org.opensearch.index.remote.RemoteStoreUtils.getRemoteStoreRepoName; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; @@ -118,7 +118,7 @@ private boolean needsRemoteIndexSettingsUpdate( } /** - * Updates the remote store path strategy metadata for the index when it is migrating to remote. + * Updates the remote store custom metadata for the index when it is migrating to remote. * This is run during state change of each shard copy when the cluster is in `MIXED` mode and the direction of migration is `REMOTE_STORE` * Should not interfere with docrep functionality even if the index is in docrep nodes since this metadata * is not used anywhere in the docrep flow @@ -127,20 +127,20 @@ private boolean needsRemoteIndexSettingsUpdate( * @param indexMetadataBuilder Mutated {@link IndexMetadata.Builder} having the previous state updates * @param index index name */ - public void maybeUpdateRemoteStorePathStrategy(IndexMetadata.Builder indexMetadataBuilder, String index) { - if (indexHasRemotePathMetadata(indexMetadata) == false) { - logger.info("Adding remote store path strategy for index [{}] during migration", index); + public void maybeUpdateRemoteStoreCustomMetadata(IndexMetadata.Builder indexMetadataBuilder, String index) { + if (indexHasRemoteCustomMetadata(indexMetadata) == false) { + logger.info("Adding remote store custom data for index [{}] during migration", index); indexMetadataBuilder.putCustom( REMOTE_STORE_CUSTOM_KEY, - determineRemoteStorePathStrategyDuringMigration(clusterSettings, discoveryNodes) + determineRemoteStoreCustomMetadataDuringMigration(clusterSettings, discoveryNodes) ); } else { - logger.debug("Index {} already has remote store path strategy", index); + logger.debug("Index {} already has remote store custom data", index); } } public static boolean indexHasAllRemoteStoreRelatedMetadata(IndexMetadata indexMetadata) { - return indexHasRemoteStoreSettings(indexMetadata.getSettings()) && indexHasRemotePathMetadata(indexMetadata); + return indexHasRemoteStoreSettings(indexMetadata.getSettings()) && indexHasRemoteCustomMetadata(indexMetadata); } /** @@ -167,9 +167,11 @@ public static boolean indexHasRemoteStoreSettings(Settings indexSettings) { * @param indexMetadata Current index metadata * @return true if all above conditions match. false otherwise */ - public static boolean indexHasRemotePathMetadata(IndexMetadata indexMetadata) { + public static boolean indexHasRemoteCustomMetadata(IndexMetadata indexMetadata) { Map customMetadata = indexMetadata.getCustomData(REMOTE_STORE_CUSTOM_KEY); - return Objects.nonNull(customMetadata) && Objects.nonNull(customMetadata.get(PathType.NAME)); + return Objects.nonNull(customMetadata) + && (Objects.nonNull(customMetadata.get(PathType.NAME)) + || Objects.nonNull(customMetadata.get(IndexMetadata.TRANSLOG_METADATA_KEY))); } public static void updateRemoteStoreSettings(Settings.Builder settingsBuilder, String segmentRepository, String translogRepository) { diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreCustomMetadataResolver.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreCustomMetadataResolver.java new file mode 100644 index 0000000000000..e8a0dda5a699e --- /dev/null +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreCustomMetadataResolver.java @@ -0,0 +1,74 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.opensearch.Version; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.indices.RemoteStoreSettings; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.Repository; +import org.opensearch.repositories.RepositoryMissingException; +import org.opensearch.repositories.blobstore.BlobStoreRepository; + +import java.util.function.Supplier; + +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.getRemoteStoreTranslogRepo; + +/** + * Determines the {@link RemoteStorePathStrategy} at the time of index metadata creation. + * + * @opensearch.internal + */ +@ExperimentalApi +public class RemoteStoreCustomMetadataResolver { + + private final RemoteStoreSettings remoteStoreSettings; + private final Supplier minNodeVersionSupplier; + private final Supplier repositoriesServiceSupplier; + private final Settings settings; + + public RemoteStoreCustomMetadataResolver( + RemoteStoreSettings remoteStoreSettings, + Supplier minNodeVersionSupplier, + Supplier repositoriesServiceSupplier, + Settings settings + ) { + this.remoteStoreSettings = remoteStoreSettings; + this.minNodeVersionSupplier = minNodeVersionSupplier; + this.repositoriesServiceSupplier = repositoriesServiceSupplier; + this.settings = settings; + } + + public RemoteStorePathStrategy getPathStrategy() { + PathType pathType; + PathHashAlgorithm pathHashAlgorithm; + // Min node version check ensures that we are enabling the new prefix type only when all the nodes understand it. + pathType = Version.V_2_14_0.compareTo(minNodeVersionSupplier.get()) <= 0 ? remoteStoreSettings.getPathType() : PathType.FIXED; + // If the path type is fixed, hash algorithm is not applicable. + pathHashAlgorithm = pathType == PathType.FIXED ? null : remoteStoreSettings.getPathHashAlgorithm(); + return new RemoteStorePathStrategy(pathType, pathHashAlgorithm); + } + + public boolean isTranslogMetadataEnabled() { + Repository repository; + try { + repository = repositoriesServiceSupplier.get().repository(getRemoteStoreTranslogRepo(settings)); + } catch (RepositoryMissingException ex) { + throw new IllegalArgumentException("Repository should be created before creating index with remote_store enabled setting", ex); + } + BlobStoreRepository blobStoreRepository = (BlobStoreRepository) repository; + return Version.V_2_15_0.compareTo(minNodeVersionSupplier.get()) <= 0 + && remoteStoreSettings.isTranslogMetadataEnabled() + && blobStoreRepository.blobStore().isBlobMetadataEnabled(); + } + +} diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java deleted file mode 100644 index 178de406ed681..0000000000000 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.remote; - -import org.opensearch.Version; -import org.opensearch.common.annotation.ExperimentalApi; -import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; -import org.opensearch.index.remote.RemoteStoreEnums.PathType; -import org.opensearch.indices.RemoteStoreSettings; - -import java.util.function.Supplier; - -/** - * Determines the {@link RemoteStorePathStrategy} at the time of index metadata creation. - * - * @opensearch.internal - */ -@ExperimentalApi -public class RemoteStorePathStrategyResolver { - - private final RemoteStoreSettings remoteStoreSettings; - private final Supplier minNodeVersionSupplier; - - public RemoteStorePathStrategyResolver(RemoteStoreSettings remoteStoreSettings, Supplier minNodeVersionSupplier) { - this.remoteStoreSettings = remoteStoreSettings; - this.minNodeVersionSupplier = minNodeVersionSupplier; - } - - public RemoteStorePathStrategy get() { - PathType pathType; - PathHashAlgorithm pathHashAlgorithm; - // Min node version check ensures that we are enabling the new prefix type only when all the nodes understand it. - pathType = Version.V_2_14_0.compareTo(minNodeVersionSupplier.get()) <= 0 ? remoteStoreSettings.getPathType() : PathType.FIXED; - // If the path type is fixed, hash algorithm is not applicable. - pathHashAlgorithm = pathType == PathType.FIXED ? null : remoteStoreSettings.getPathHashAlgorithm(); - return new RemoteStorePathStrategy(pathType, pathHashAlgorithm); - } -} diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java index 27b1b88034573..9a9de6c819424 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java @@ -32,6 +32,7 @@ import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING; import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING; +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_METADATA; /** * Utils for remote store @@ -181,25 +182,50 @@ public static RemoteStorePathStrategy determineRemoteStorePathStrategy(IndexMeta return new RemoteStorePathStrategy(RemoteStoreEnums.PathType.FIXED); } + /** + * Determines if translog file object metadata can be used to store checkpoint file data. + */ + public static boolean determineTranslogMetadataEnabled(IndexMetadata indexMetadata) { + Map remoteCustomData = indexMetadata.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY); + assert remoteCustomData == null || remoteCustomData.containsKey(IndexMetadata.TRANSLOG_METADATA_KEY); + if (remoteCustomData != null && remoteCustomData.containsKey(IndexMetadata.TRANSLOG_METADATA_KEY)) { + return Boolean.parseBoolean(remoteCustomData.get(IndexMetadata.TRANSLOG_METADATA_KEY)); + } + return false; + } + /** * Generates the remote store path type information to be added to custom data of index metadata during migration * * @param clusterSettings Current Cluster settings from {@link ClusterState} - * @param discoveryNodes Current {@link DiscoveryNodes} from the cluster state + * @param discoveryNodes Current {@link DiscoveryNodes} from the cluster state * @return {@link Map} to be added as custom data in index metadata */ - public static Map determineRemoteStorePathStrategyDuringMigration( + public static Map determineRemoteStoreCustomMetadataDuringMigration( Settings clusterSettings, DiscoveryNodes discoveryNodes ) { + Map remoteCustomData = new HashMap<>(); Version minNodeVersion = discoveryNodes.getMinNodeVersion(); + + // TODO: During the document replication to a remote store migration, there should be a check to determine if the registered + // translog blobstore supports custom metadata or not. + // Currently, the blobStoreMetadataEnabled flag is set to false because the integration tests run on the local file system, which + // does not support custom metadata. + // https://github.com/opensearch-project/OpenSearch/issues/13745 + boolean blobStoreMetadataEnabled = false; + boolean translogMetadata = Version.CURRENT.compareTo(minNodeVersion) <= 0 + && CLUSTER_REMOTE_STORE_TRANSLOG_METADATA.get(clusterSettings) + && blobStoreMetadataEnabled; + + remoteCustomData.put(IndexMetadata.TRANSLOG_METADATA_KEY, Boolean.toString(translogMetadata)); + RemoteStoreEnums.PathType pathType = Version.CURRENT.compareTo(minNodeVersion) <= 0 ? CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.get(clusterSettings) : RemoteStoreEnums.PathType.FIXED; RemoteStoreEnums.PathHashAlgorithm pathHashAlgorithm = pathType == RemoteStoreEnums.PathType.FIXED ? null : CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.get(clusterSettings); - Map remoteCustomData = new HashMap<>(); remoteCustomData.put(RemoteStoreEnums.PathType.NAME, pathType.name()); if (Objects.nonNull(pathHashAlgorithm)) { remoteCustomData.put(RemoteStoreEnums.PathHashAlgorithm.NAME, pathHashAlgorithm.name()); diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 81663016409e4..fbec820882a51 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -4989,7 +4989,14 @@ public void deleteTranslogFilesFromRemoteTranslog() throws IOException { TranslogFactory translogFactory = translogFactorySupplier.apply(indexSettings, shardRouting); assert translogFactory instanceof RemoteBlobStoreInternalTranslogFactory; Repository repository = ((RemoteBlobStoreInternalTranslogFactory) translogFactory).getRepository(); - RemoteFsTranslog.cleanup(repository, shardId, getThreadPool(), indexSettings.getRemoteStorePathStrategy(), remoteStoreSettings); + RemoteFsTranslog.cleanup( + repository, + shardId, + getThreadPool(), + indexSettings.getRemoteStorePathStrategy(), + remoteStoreSettings, + indexSettings().isTranslogMetadataEnabled() + ); } /* @@ -5014,7 +5021,8 @@ public void syncTranslogFilesFromRemoteTranslog() throws IOException { indexSettings.getRemoteStorePathStrategy(), remoteStoreSettings, logger, - shouldSeedRemoteStore() + shouldSeedRemoteStore(), + indexSettings().isTranslogMetadataEnabled() ); } diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java index c7f756957076c..67549c86b7dd2 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java @@ -91,6 +91,7 @@ public class RemoteFsTranslog extends Translog { private static final int SYNC_PERMIT = 1; private final Semaphore syncPermit = new Semaphore(SYNC_PERMIT); private final AtomicBoolean pauseSync = new AtomicBoolean(false); + private final boolean isTranslogMetadataEnabled; public RemoteFsTranslog( TranslogConfig config, @@ -110,6 +111,7 @@ public RemoteFsTranslog( this.startedPrimarySupplier = startedPrimarySupplier; this.remoteTranslogTransferTracker = remoteTranslogTransferTracker; fileTransferTracker = new FileTransferTracker(shardId, remoteTranslogTransferTracker); + isTranslogMetadataEnabled = indexSettings().isTranslogMetadataEnabled(); this.translogTransferManager = buildTranslogTransferManager( blobStoreRepository, threadPool, @@ -117,7 +119,8 @@ public RemoteFsTranslog( fileTransferTracker, remoteTranslogTransferTracker, indexSettings().getRemoteStorePathStrategy(), - remoteStoreSettings + remoteStoreSettings, + isTranslogMetadataEnabled ); try { download(translogTransferManager, location, logger, config.shouldSeedRemote()); @@ -169,7 +172,8 @@ public static void download( RemoteStorePathStrategy pathStrategy, RemoteStoreSettings remoteStoreSettings, Logger logger, - boolean seedRemote + boolean seedRemote, + boolean isTranslogMetadataEnabled ) throws IOException { assert repository instanceof BlobStoreRepository : String.format( Locale.ROOT, @@ -188,7 +192,8 @@ public static void download( fileTransferTracker, remoteTranslogTransferTracker, pathStrategy, - remoteStoreSettings + remoteStoreSettings, + isTranslogMetadataEnabled ); RemoteFsTranslog.download(translogTransferManager, location, logger, seedRemote); logger.trace(remoteTranslogTransferTracker.toString()); @@ -293,7 +298,8 @@ public static TranslogTransferManager buildTranslogTransferManager( FileTransferTracker fileTransferTracker, RemoteTranslogTransferTracker tracker, RemoteStorePathStrategy pathStrategy, - RemoteStoreSettings remoteStoreSettings + RemoteStoreSettings remoteStoreSettings, + boolean isTranslogMetadataEnabled ) { assert Objects.nonNull(pathStrategy); String indexUUID = shardId.getIndex().getUUID(); @@ -315,7 +321,16 @@ public static TranslogTransferManager buildTranslogTransferManager( .build(); BlobPath mdPath = pathStrategy.generatePath(mdPathInput); BlobStoreTransferService transferService = new BlobStoreTransferService(blobStoreRepository.blobStore(), threadPool); - return new TranslogTransferManager(shardId, transferService, dataPath, mdPath, fileTransferTracker, tracker, remoteStoreSettings); + return new TranslogTransferManager( + shardId, + transferService, + dataPath, + mdPath, + fileTransferTracker, + tracker, + remoteStoreSettings, + isTranslogMetadataEnabled + ); } @Override @@ -592,7 +607,8 @@ public static void cleanup( ShardId shardId, ThreadPool threadPool, RemoteStorePathStrategy pathStrategy, - RemoteStoreSettings remoteStoreSettings + RemoteStoreSettings remoteStoreSettings, + boolean isTranslogMetadataEnabled ) throws IOException { assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; BlobStoreRepository blobStoreRepository = (BlobStoreRepository) repository; @@ -607,7 +623,8 @@ public static void cleanup( fileTransferTracker, remoteTranslogTransferTracker, pathStrategy, - remoteStoreSettings + remoteStoreSettings, + isTranslogMetadataEnabled ); // clean up all remote translog files translogTransferManager.deleteTranslogFiles(); diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java b/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java index bec2d78d9af62..704f6419da60a 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java @@ -18,7 +18,7 @@ import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; -import org.opensearch.common.blobstore.FetchBlobResult; +import org.opensearch.common.blobstore.InputStreamWithMetadata; import org.opensearch.common.blobstore.stream.write.WriteContext; import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.common.blobstore.transfer.RemoteTransferContainer; @@ -28,16 +28,20 @@ import org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; import org.opensearch.threadpool.ThreadPool; +import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.nio.channels.FileChannel; import java.nio.file.StandardOpenOption; +import java.util.Base64; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; import static org.opensearch.common.blobstore.BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC; +import static org.opensearch.index.translog.transfer.TranslogTransferManager.CHECKPOINT_FILE_DATA_KEY; /** * Service that handles remote transfer of translog and checkpoint files @@ -104,6 +108,30 @@ public void uploadBlobs( } + // Builds a metadata map containing the Base64-encoded checkpoint file data associated with a translog file. + static Map buildTransferFileMetadata(InputStream metadataInputStream) throws IOException { + Map metadata = new HashMap<>(); + try (ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream()) { + byte[] buffer = new byte[128]; + int bytesRead; + int totalBytesRead = 0; + + while ((bytesRead = metadataInputStream.read(buffer)) != -1) { + byteArrayOutputStream.write(buffer, 0, bytesRead); + totalBytesRead += bytesRead; + if (totalBytesRead > 1024) { + // We enforce a limit of 1KB on the size of the checkpoint file. + throw new IOException("Input stream exceeds 1KB limit"); + } + } + + byte[] bytes = byteArrayOutputStream.toByteArray(); + String metadataString = Base64.getEncoder().encodeToString(bytes); + metadata.put(CHECKPOINT_FILE_DATA_KEY, metadataString); + } + return metadata; + } + private void uploadBlob( TransferFileSnapshot fileSnapshot, ActionListener listener, @@ -113,6 +141,11 @@ private void uploadBlob( try { ChannelFactory channelFactory = FileChannel::open; + Map metadata = null; + if (fileSnapshot.getMetadataFileInputStream() != null) { + metadata = buildTransferFileMetadata(fileSnapshot.getMetadataFileInputStream()); + } + long contentLength; try (FileChannel channel = channelFactory.open(fileSnapshot.getPath(), StandardOpenOption.READ)) { contentLength = channel.size(); @@ -130,7 +163,8 @@ private void uploadBlob( writePriority, (size, position) -> new OffsetRangeFileInputStream(fileSnapshot.getPath(), size, position), Objects.requireNonNull(fileSnapshot.getChecksum()), - remoteIntegrityEnabled + remoteIntegrityEnabled, + metadata ); ActionListener completionListener = ActionListener.wrap(resp -> listener.onResponse(fileSnapshot), ex -> { logger.error(() -> new ParameterizedMessage("Failed to upload blob {}", fileSnapshot.getName()), ex); @@ -168,7 +202,8 @@ public InputStream downloadBlob(Iterable path, String fileName) throws I @Override @ExperimentalApi - public FetchBlobResult downloadBlobWithMetadata(Iterable path, String fileName) throws IOException { + public InputStreamWithMetadata downloadBlobWithMetadata(Iterable path, String fileName) throws IOException { + assert blobStore.isBlobMetadataEnabled(); return blobStore.blobContainer((BlobPath) path).readBlobWithMetadata(fileName); } diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/FileSnapshot.java b/server/src/main/java/org/opensearch/index/translog/transfer/FileSnapshot.java index dcec94edd694f..86f042af0584b 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/FileSnapshot.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/FileSnapshot.java @@ -108,6 +108,8 @@ public static class TransferFileSnapshot extends FileSnapshot { private final long primaryTerm; private Long checksum; + @Nullable + private InputStream metadataFileInputStream; public TransferFileSnapshot(Path path, long primaryTerm, Long checksum) throws IOException { super(path); @@ -128,6 +130,14 @@ public long getPrimaryTerm() { return primaryTerm; } + public void setMetadataFileInputStream(InputStream inputStream) { + this.metadataFileInputStream = inputStream; + } + + public InputStream getMetadataFileInputStream() { + return metadataFileInputStream; + } + @Override public int hashCode() { return Objects.hash(primaryTerm, super.hashCode()); diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TransferService.java b/server/src/main/java/org/opensearch/index/translog/transfer/TransferService.java index 0894ebf500ebd..0ff983739438b 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TransferService.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TransferService.java @@ -11,7 +11,7 @@ import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; -import org.opensearch.common.blobstore.FetchBlobResult; +import org.opensearch.common.blobstore.InputStreamWithMetadata; import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.core.action.ActionListener; import org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; @@ -131,11 +131,11 @@ void uploadBlobs( * * @param path the remote path from where download should be made * @param fileName the name of the file - * @return {@link FetchBlobResult} of the remote file + * @return {@link InputStreamWithMetadata} of the remote file * @throws IOException the exception while reading the data */ @ExperimentalApi - FetchBlobResult downloadBlobWithMetadata(Iterable path, String fileName) throws IOException; + InputStreamWithMetadata downloadBlobWithMetadata(Iterable path, String fileName) throws IOException; void listAllInSortedOrder(Iterable path, String filenamePrefix, int limit, ActionListener> listener); diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TransferSnapshot.java b/server/src/main/java/org/opensearch/index/translog/transfer/TransferSnapshot.java index ef34fd31a296b..6dcdc8f8cf44a 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TransferSnapshot.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TransferSnapshot.java @@ -12,6 +12,7 @@ import org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; import org.opensearch.index.translog.transfer.FileSnapshot.TranslogFileSnapshot; +import java.io.IOException; import java.util.Set; /** @@ -39,4 +40,10 @@ public interface TransferSnapshot { * @return the translog transfer metadata */ TranslogTransferMetadata getTranslogTransferMetadata(); + + /** + * The snapshot of the translog generational files having checkpoint file inputStream as metadata + * @return the set of translog files having checkpoint file inputStream as metadata. + */ + Set getTranslogFileSnapshotWithMetadata() throws IOException; } diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java index fb78731246a07..ae007c0c33e1e 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java @@ -64,6 +64,16 @@ public Set getTranslogFileSnapshots() { return translogCheckpointFileInfoTupleSet.stream().map(Tuple::v1).collect(Collectors.toSet()); } + @Override + public Set getTranslogFileSnapshotWithMetadata() throws IOException { + for (Tuple tuple : translogCheckpointFileInfoTupleSet) { + TransferFileSnapshot translogFileSnapshot = tuple.v1(); + TransferFileSnapshot checkpointFileSnapshot = tuple.v2(); + translogFileSnapshot.setMetadataFileInputStream(checkpointFileSnapshot.inputStream()); + } + return getTranslogFileSnapshots(); + } + @Override public TranslogTransferMetadata getTranslogTransferMetadata() { return new TranslogTransferMetadata( diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java index 47638f44fd6fc..1cc39cdf442e2 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java @@ -16,6 +16,7 @@ import org.opensearch.common.SetOnce; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.InputStreamWithMetadata; import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.common.io.VersionedCodecStreamWrapper; import org.opensearch.common.io.stream.BytesStreamOutput; @@ -36,6 +37,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; +import java.util.Base64; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -63,6 +65,9 @@ public class TranslogTransferManager { private final RemoteTranslogTransferTracker remoteTranslogTransferTracker; private final RemoteStoreSettings remoteStoreSettings; private static final int METADATA_FILES_TO_FETCH = 10; + // Flag to include checkpoint file data as translog file metadata during upload/download + private final boolean isTranslogMetadataEnabled; + final static String CHECKPOINT_FILE_DATA_KEY = "ckp-data"; private final Logger logger; @@ -79,7 +84,8 @@ public TranslogTransferManager( BlobPath remoteMetadataTransferPath, FileTransferTracker fileTransferTracker, RemoteTranslogTransferTracker remoteTranslogTransferTracker, - RemoteStoreSettings remoteStoreSettings + RemoteStoreSettings remoteStoreSettings, + boolean isTranslogMetadataEnabled ) { this.shardId = shardId; this.transferService = transferService; @@ -89,6 +95,7 @@ public TranslogTransferManager( this.logger = Loggers.getLogger(getClass(), shardId); this.remoteTranslogTransferTracker = remoteTranslogTransferTracker; this.remoteStoreSettings = remoteStoreSettings; + this.isTranslogMetadataEnabled = isTranslogMetadataEnabled; } public RemoteTranslogTransferTracker getRemoteTranslogTransferTracker() { @@ -110,8 +117,12 @@ public boolean transferSnapshot(TransferSnapshot transferSnapshot, TranslogTrans long prevUploadTimeInMillis = remoteTranslogTransferTracker.getTotalUploadTimeInMillis(); try { - toUpload.addAll(fileTransferTracker.exclusionFilter(transferSnapshot.getTranslogFileSnapshots())); - toUpload.addAll(fileTransferTracker.exclusionFilter((transferSnapshot.getCheckpointFileSnapshots()))); + if (isTranslogMetadataEnabled) { + toUpload.addAll(fileTransferTracker.exclusionFilter(transferSnapshot.getTranslogFileSnapshotWithMetadata())); + } else { + toUpload.addAll(fileTransferTracker.exclusionFilter(transferSnapshot.getTranslogFileSnapshots())); + toUpload.addAll(fileTransferTracker.exclusionFilter((transferSnapshot.getCheckpointFileSnapshots()))); + } if (toUpload.isEmpty()) { logger.trace("Nothing to upload for transfer"); return true; @@ -236,30 +247,78 @@ public boolean downloadTranslog(String primaryTerm, String generation, Path loca generation, location ); - // Download Checkpoint file from remote to local FS String ckpFileName = Translog.getCommitCheckpointFileName(Long.parseLong(generation)); - downloadToFS(ckpFileName, location, primaryTerm); - // Download translog file from remote to local FS String translogFilename = Translog.getFilename(Long.parseLong(generation)); - downloadToFS(translogFilename, location, primaryTerm); + if (isTranslogMetadataEnabled == false) { + // Download Checkpoint file, translog file from remote to local FS + downloadToFS(ckpFileName, location, primaryTerm, false); + downloadToFS(translogFilename, location, primaryTerm, false); + } else { + // Download translog.tlog file with object metadata from remote to local FS + Map metadata = downloadToFS(translogFilename, location, primaryTerm, true); + try { + assert metadata != null && !metadata.isEmpty() && metadata.containsKey(CHECKPOINT_FILE_DATA_KEY); + recoverCkpFileUsingMetadata(metadata, location, generation, translogFilename); + } catch (Exception e) { + throw new IOException("Failed to recover checkpoint file from remote", e); + } + } return true; } - private void downloadToFS(String fileName, Path location, String primaryTerm) throws IOException { + /** + * Process the provided metadata and tries to recover translog.ckp file to the FS. + */ + private void recoverCkpFileUsingMetadata(Map metadata, Path location, String generation, String fileName) + throws IOException { + + String ckpFileName = Translog.getCommitCheckpointFileName(Long.parseLong(generation)); + Path filePath = location.resolve(ckpFileName); + // Here, we always override the existing file if present. + deleteFileIfExists(filePath); + + String ckpDataBase64 = metadata.get(CHECKPOINT_FILE_DATA_KEY); + if (ckpDataBase64 == null) { + logger.error("Error processing metadata for translog file: {}", fileName); + throw new IllegalStateException( + "Checkpoint file data key " + CHECKPOINT_FILE_DATA_KEY + " is expected but not found in metadata for file: " + fileName + ); + } + byte[] ckpFileBytes = Base64.getDecoder().decode(ckpDataBase64); + Files.write(filePath, ckpFileBytes); + } + + private Map downloadToFS(String fileName, Path location, String primaryTerm, boolean withMetadata) throws IOException { Path filePath = location.resolve(fileName); // Here, we always override the existing file if present. // We need to change this logic when we introduce incremental download - if (Files.exists(filePath)) { - Files.delete(filePath); - } + deleteFileIfExists(filePath); + Map metadata = null; boolean downloadStatus = false; long bytesToRead = 0, downloadStartTime = System.nanoTime(); - try (InputStream inputStream = transferService.downloadBlob(remoteDataTransferPath.add(primaryTerm), fileName)) { - // Capture number of bytes for stats before reading - bytesToRead = inputStream.available(); - Files.copy(inputStream, filePath); - downloadStatus = true; + try { + if (withMetadata) { + try ( + InputStreamWithMetadata inputStreamWithMetadata = transferService.downloadBlobWithMetadata( + remoteDataTransferPath.add(primaryTerm), + fileName + ) + ) { + InputStream inputStream = inputStreamWithMetadata.getInputStream(); + metadata = inputStreamWithMetadata.getMetadata(); + + bytesToRead = inputStream.available(); + Files.copy(inputStream, filePath); + downloadStatus = true; + } + } else { + try (InputStream inputStream = transferService.downloadBlob(remoteDataTransferPath.add(primaryTerm), fileName)) { + bytesToRead = inputStream.available(); + Files.copy(inputStream, filePath); + downloadStatus = true; + } + } } finally { remoteTranslogTransferTracker.addDownloadTimeInMillis((System.nanoTime() - downloadStartTime) / 1_000_000L); if (downloadStatus) { @@ -269,6 +328,13 @@ private void downloadToFS(String fileName, Path location, String primaryTerm) th // Mark in FileTransferTracker so that the same files are not uploaded at the time of translog sync fileTransferTracker.add(fileName, true); + return metadata; + } + + private void deleteFileIfExists(Path filePath) throws IOException { + if (Files.exists(filePath)) { + Files.delete(filePath); + } } public TranslogTransferMetadata readMetadata() throws IOException { @@ -391,7 +457,11 @@ public void deleteGenerationAsync(long primaryTerm, Set generations, Runna // Add .ckp and .tlog file to translog file list which is located in basePath/ String ckpFileName = Translog.getCommitCheckpointFileName(generation); String translogFileName = Translog.getFilename(generation); - translogFiles.addAll(List.of(ckpFileName, translogFileName)); + if (isTranslogMetadataEnabled == false) { + translogFiles.addAll(List.of(ckpFileName, translogFileName)); + } else { + translogFiles.add(translogFileName); + } }); // Delete the translog and checkpoint files asynchronously deleteTranslogFilesAsync(primaryTerm, translogFiles, onCompletion); diff --git a/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java b/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java index 2e45888e099e7..836af2a14637f 100644 --- a/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java +++ b/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java @@ -80,6 +80,18 @@ public class RemoteStoreSettings { Property.Dynamic ); + /** + * This setting is used to disable uploading translog.ckp file as metadata to translog.tlog. This setting is effective only for + * repositories that supports metadata read and write with metadata and is applicable for only remote store enabled clusters. + */ + @ExperimentalApi + public static final Setting CLUSTER_REMOTE_STORE_TRANSLOG_METADATA = Setting.boolSetting( + "cluster.remote_store.index.translog.translog_metadata", + true, + Property.NodeScope, + Property.Dynamic + ); + /** * This setting is used to set the remote store blob store path hash algorithm strategy. This setting is effective only for * remote store enabled cluster. This setting will come to effect if the {@link #CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING} @@ -111,6 +123,7 @@ public class RemoteStoreSettings { private volatile RemoteStoreEnums.PathType pathType; private volatile RemoteStoreEnums.PathHashAlgorithm pathHashAlgorithm; private volatile int maxRemoteTranslogReaders; + private volatile boolean isTranslogMetadataEnabled; public RemoteStoreSettings(Settings settings, ClusterSettings clusterSettings) { clusterRemoteTranslogBufferInterval = CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(settings); @@ -134,6 +147,9 @@ public RemoteStoreSettings(Settings settings, ClusterSettings clusterSettings) { pathType = clusterSettings.get(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING); clusterSettings.addSettingsUpdateConsumer(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING, this::setPathType); + isTranslogMetadataEnabled = clusterSettings.get(CLUSTER_REMOTE_STORE_TRANSLOG_METADATA); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_REMOTE_STORE_TRANSLOG_METADATA, this::setTranslogMetadataEnabled); + pathHashAlgorithm = clusterSettings.get(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING); clusterSettings.addSettingsUpdateConsumer(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING, this::setPathHashAlgorithm); @@ -180,6 +196,14 @@ private void setPathType(RemoteStoreEnums.PathType pathType) { this.pathType = pathType; } + private void setTranslogMetadataEnabled(boolean isTranslogMetadataEnabled) { + this.isTranslogMetadataEnabled = isTranslogMetadataEnabled; + } + + public boolean isTranslogMetadataEnabled() { + return isTranslogMetadataEnabled; + } + private void setPathHashAlgorithm(RemoteStoreEnums.PathHashAlgorithm pathHashAlgorithm) { this.pathHashAlgorithm = pathHashAlgorithm; } diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 6f90ebd8ad188..4a702ec0234ef 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -888,7 +888,8 @@ protected Node( systemIndices, forbidPrivateIndexSettings, awarenessReplicaBalance, - remoteStoreSettings + remoteStoreSettings, + repositoriesServiceReference::get ); pluginsService.filterPlugins(Plugin.class) .forEach( diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index 0ac28ff4e6d8e..6884cbdf753ae 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -484,7 +484,7 @@ public ClusterState execute(ClusterState currentState) { .put(snapshotIndexMetadata.getSettings()) .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) ); - createIndexService.addRemoteStorePathStrategyInCustomData(indexMdBuilder, false); + createIndexService.addRemoteStoreCustomMetadata(indexMdBuilder, false); shardLimitValidator.validateShardLimit( renamedIndexName, snapshotIndexMetadata.getSettings(), diff --git a/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java b/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java index 2fefbed6b2b64..88dac571ba861 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java @@ -740,7 +740,8 @@ public void testRolloverClusterState() throws Exception { systemIndices, false, new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + null ); MetadataIndexAliasesService indexAliasesService = new MetadataIndexAliasesService( clusterService, @@ -879,7 +880,8 @@ public void testRolloverClusterStateForDataStream() throws Exception { systemIndices, false, new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + null ); MetadataIndexAliasesService indexAliasesService = new MetadataIndexAliasesService( clusterService, @@ -1058,7 +1060,8 @@ public void testRolloverClusterStateForDataStreamNoTemplate() throws Exception { new SystemIndices(emptyMap()), false, new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + null ); MetadataIndexAliasesService indexAliasesService = new MetadataIndexAliasesService( clusterService, diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index ac8fcfa487b22..de1d422d6016b 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -57,6 +57,7 @@ import org.opensearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; +import org.opensearch.common.blobstore.BlobStore; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.IndexScopedSettings; @@ -88,6 +89,8 @@ import org.opensearch.indices.SystemIndices; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.snapshots.EmptySnapshotsInfoService; import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; @@ -117,6 +120,7 @@ import java.util.function.BiConsumer; import java.util.function.BiFunction; import java.util.function.Consumer; +import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -157,6 +161,7 @@ import static org.opensearch.node.Node.NODE_ATTRIBUTES; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.getRemoteStoreTranslogRepo; import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; import static org.hamcrest.Matchers.containsString; @@ -177,6 +182,9 @@ public class MetadataCreateIndexServiceTests extends OpenSearchTestCase { private CreateIndexClusterStateUpdateRequest request; private QueryShardContext queryShardContext; private ClusterSettings clusterSettings; + private IndicesService indicesServices; + private RepositoriesService repositoriesService; + private Supplier repositoriesServiceSupplier; private static final String segmentRepositoryNameAttributeKey = NODE_ATTRIBUTES.getKey() + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; private static final String translogRepositoryNameAttributeKey = NODE_ATTRIBUTES.getKey() @@ -189,6 +197,9 @@ public class MetadataCreateIndexServiceTests extends OpenSearchTestCase { public void setup() throws Exception { super.setUp(); clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + indicesServices = mock(IndicesService.class); + repositoriesServiceSupplier = mock(Supplier.class); + repositoriesService = mock(RepositoriesService.class); } @Before @@ -703,7 +714,7 @@ public void testValidateIndexName() throws Exception { MetadataCreateIndexService checkerService = new MetadataCreateIndexService( Settings.EMPTY, clusterService, - null, + indicesServices, null, null, createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), @@ -714,7 +725,8 @@ public void testValidateIndexName() throws Exception { new SystemIndices(Collections.emptyMap()), false, new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + repositoriesServiceSupplier ); validateIndexName( checkerService, @@ -790,7 +802,7 @@ public void testValidateDotIndex() { MetadataCreateIndexService checkerService = new MetadataCreateIndexService( Settings.EMPTY, clusterService, - null, + indicesServices, null, null, createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), @@ -801,7 +813,8 @@ public void testValidateDotIndex() { new SystemIndices(Collections.singletonMap("foo", systemIndexDescriptors)), false, new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + repositoriesServiceSupplier ); // Check deprecations assertFalse(checkerService.validateDotIndex(".test2", false)); @@ -1216,7 +1229,7 @@ public void testvalidateIndexSettings() { MetadataCreateIndexService checkerService = new MetadataCreateIndexService( settings, clusterService, - null, + indicesServices, null, null, createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), @@ -1227,7 +1240,8 @@ public void testvalidateIndexSettings() { new SystemIndices(Collections.emptyMap()), true, new AwarenessReplicaBalance(settings, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + repositoriesServiceSupplier ); List validationErrors = checkerService.getIndexSettingsValidationErrors(settings, false, Optional.empty()); @@ -1336,7 +1350,7 @@ public void testClusterForceReplicationTypeInValidateIndexSettings() { final MetadataCreateIndexService checkerService = new MetadataCreateIndexService( forceClusterSettingEnabled, clusterService, - null, + indicesServices, null, null, createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), @@ -1347,7 +1361,8 @@ public void testClusterForceReplicationTypeInValidateIndexSettings() { new SystemIndices(Collections.emptyMap()), true, new AwarenessReplicaBalance(forceClusterSettingEnabled, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + repositoriesServiceSupplier ); // Use DOCUMENT replication type setting for index creation final Settings indexSettings = Settings.builder().put(INDEX_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.DOCUMENT).build(); @@ -1462,7 +1477,7 @@ public void testRemoteStoreDisabledByUserIndexSettings() { MetadataCreateIndexService checkerService = new MetadataCreateIndexService( Settings.EMPTY, clusterService, - null, + indicesServices, null, null, createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), @@ -1473,7 +1488,8 @@ public void testRemoteStoreDisabledByUserIndexSettings() { new SystemIndices(Collections.emptyMap()), true, new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + repositoriesServiceSupplier ); final List validationErrors = checkerService.getIndexSettingsValidationErrors( @@ -1497,7 +1513,7 @@ public void testRemoteStoreOverrideSegmentRepoIndexSettings() { MetadataCreateIndexService checkerService = new MetadataCreateIndexService( Settings.EMPTY, clusterService, - null, + indicesServices, null, null, createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), @@ -1508,7 +1524,8 @@ public void testRemoteStoreOverrideSegmentRepoIndexSettings() { new SystemIndices(Collections.emptyMap()), true, new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + repositoriesServiceSupplier ); final List validationErrors = checkerService.getIndexSettingsValidationErrors( @@ -1537,7 +1554,7 @@ public void testRemoteStoreOverrideTranslogRepoIndexSettings() { MetadataCreateIndexService checkerService = new MetadataCreateIndexService( Settings.EMPTY, clusterService, - null, + indicesServices, null, null, createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), @@ -1548,7 +1565,8 @@ public void testRemoteStoreOverrideTranslogRepoIndexSettings() { new SystemIndices(Collections.emptyMap()), true, new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + repositoriesServiceSupplier ); final List validationErrors = checkerService.getIndexSettingsValidationErrors( @@ -1755,10 +1773,16 @@ private IndexMetadata testRemoteCustomData(boolean remoteStoreEnabled, PathType RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); ThreadPool threadPool = new TestThreadPool(getTestName()); + BlobStoreRepository repositoryMock = mock(BlobStoreRepository.class); + when(repositoriesServiceSupplier.get()).thenReturn(repositoriesService); + when(repositoriesService.repository(getRemoteStoreTranslogRepo(settings))).thenReturn(repositoryMock); + BlobStore blobStoreMock = mock(BlobStore.class); + when(repositoryMock.blobStore()).thenReturn(blobStoreMock); + when(blobStoreMock.isBlobMetadataEnabled()).thenReturn(randomBoolean()); MetadataCreateIndexService metadataCreateIndexService = new MetadataCreateIndexService( settings, clusterService, - null, + indicesServices, null, null, createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), @@ -1769,7 +1793,8 @@ private IndexMetadata testRemoteCustomData(boolean remoteStoreEnabled, PathType new SystemIndices(Collections.emptyMap()), true, new AwarenessReplicaBalance(settings, clusterService.getClusterSettings()), - remoteStoreSettings + remoteStoreSettings, + repositoriesServiceSupplier ); CreateIndexClusterStateUpdateRequest request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); Settings indexSettings = Settings.builder() @@ -1882,7 +1907,7 @@ public void testIndexLifecycleNameSetting() { MetadataCreateIndexService checkerService = new MetadataCreateIndexService( Settings.EMPTY, clusterService, - null, + indicesServices, null, null, createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), @@ -1893,7 +1918,8 @@ public void testIndexLifecycleNameSetting() { new SystemIndices(Collections.emptyMap()), true, new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + repositoriesServiceSupplier ); final List validationErrors = checkerService.getIndexSettingsValidationErrors(ilnSetting, true, Optional.empty()); diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java index 2cb345144b220..b64cf0ed172af 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java @@ -56,8 +56,10 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.indices.DefaultRemoteStoreSettings; import org.opensearch.indices.IndexTemplateMissingException; +import org.opensearch.indices.IndicesService; import org.opensearch.indices.InvalidIndexTemplateException; import org.opensearch.indices.SystemIndices; +import org.opensearch.repositories.RepositoriesService; import org.opensearch.test.OpenSearchSingleNodeTestCase; import java.io.IOException; @@ -2038,10 +2040,13 @@ private static List putTemplate(NamedXContentRegistry xContentRegistr when(clusterService.state()).thenReturn(clusterState); when(clusterService.getSettings()).thenReturn(settings); when(clusterService.getClusterSettings()).thenReturn(clusterSettings); + IndicesService indicesServices = mock(IndicesService.class); + RepositoriesService repositoriesService = mock(RepositoriesService.class); + // when(indicesServices.getRepositoriesServiceSupplier()).thenReturn(() -> repositoriesService); MetadataCreateIndexService createIndexService = new MetadataCreateIndexService( Settings.EMPTY, clusterService, - null, + indicesServices, null, null, createTestShardLimitService(randomIntBetween(1, 1000), false), @@ -2052,7 +2057,8 @@ private static List putTemplate(NamedXContentRegistry xContentRegistr new SystemIndices(Collections.emptyMap()), true, new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + null ); MetadataIndexTemplateService service = new MetadataIndexTemplateService( clusterService, diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteMigrationIndexMetadataUpdaterTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteMigrationIndexMetadataUpdaterTests.java index d8220c93e4eeb..a2cd3f6b803e0 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteMigrationIndexMetadataUpdaterTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteMigrationIndexMetadataUpdaterTests.java @@ -163,7 +163,7 @@ public void testMaybeUpdateRemoteStorePathStrategyExecutes() { .build(), logger ); - migrationIndexMetadataUpdater.maybeUpdateRemoteStorePathStrategy(builder, indexName); + migrationIndexMetadataUpdater.maybeUpdateRemoteStoreCustomMetadata(builder, indexName); assertCustomPathMetadataIsPresent(builder.build()); } @@ -186,7 +186,7 @@ public void testMaybeUpdateRemoteStorePathStrategyDoesNotExecute() { logger ); - migrationIndexMetadataUpdater.maybeUpdateRemoteStorePathStrategy(builder, indexName); + migrationIndexMetadataUpdater.maybeUpdateRemoteStoreCustomMetadata(builder, indexName); assertCustomPathMetadataIsPresent(builder.build()); } @@ -298,7 +298,14 @@ public static Metadata createIndexMetadataWithRemoteStoreSettings(String indexNa ) .putCustom( REMOTE_STORE_CUSTOM_KEY, - Map.of(RemoteStoreEnums.PathType.NAME, "dummy", RemoteStoreEnums.PathHashAlgorithm.NAME, "dummy") + Map.of( + RemoteStoreEnums.PathType.NAME, + "dummy", + RemoteStoreEnums.PathHashAlgorithm.NAME, + "dummy", + IndexMetadata.TRANSLOG_METADATA_KEY, + "dummy" + ) ) .build(); return Metadata.builder().put(indexMetadata).build(); diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreCustomMetadataResolverTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreCustomMetadataResolverTests.java new file mode 100644 index 0000000000000..7e702ad3773e8 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreCustomMetadataResolverTests.java @@ -0,0 +1,223 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.opensearch.Version; +import org.opensearch.common.blobstore.BlobStore; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.indices.RemoteStoreSettings; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.test.OpenSearchTestCase; + +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING; +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING; +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_METADATA; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.getRemoteStoreTranslogRepo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class RemoteStoreCustomMetadataResolverTests extends OpenSearchTestCase { + + RepositoriesService repositoriesService = mock(RepositoriesService.class); + Settings settings = Settings.EMPTY; + + public void testGetPathStrategyMinVersionOlder() { + Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), randomFrom(PathType.values())).build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + RemoteStoreCustomMetadataResolver resolver = new RemoteStoreCustomMetadataResolver( + remoteStoreSettings, + () -> Version.V_2_13_0, + () -> repositoriesService, + settings + ); + assertEquals(PathType.FIXED, resolver.getPathStrategy().getType()); + assertNull(resolver.getPathStrategy().getHashAlgorithm()); + } + + public void testGetPathStrategyMinVersionNewer() { + PathType pathType = randomFrom(PathType.values()); + Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), pathType).build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + RemoteStoreCustomMetadataResolver resolver = new RemoteStoreCustomMetadataResolver( + remoteStoreSettings, + () -> Version.V_2_14_0, + () -> repositoriesService, + settings + ); + assertEquals(pathType, resolver.getPathStrategy().getType()); + if (pathType.requiresHashAlgorithm()) { + assertNotNull(resolver.getPathStrategy().getHashAlgorithm()); + } else { + assertNull(resolver.getPathStrategy().getHashAlgorithm()); + } + } + + public void testGetPathStrategyStrategy() { + // FIXED type + Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.FIXED).build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + RemoteStoreCustomMetadataResolver resolver = new RemoteStoreCustomMetadataResolver( + remoteStoreSettings, + () -> Version.V_2_14_0, + () -> repositoriesService, + settings + ); + assertEquals(PathType.FIXED, resolver.getPathStrategy().getType()); + + // FIXED type with hash algorithm + settings = Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.FIXED) + .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), randomFrom(PathHashAlgorithm.values())) + .build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + resolver = new RemoteStoreCustomMetadataResolver(remoteStoreSettings, () -> Version.V_2_14_0, () -> repositoriesService, settings); + assertEquals(PathType.FIXED, resolver.getPathStrategy().getType()); + + // HASHED_PREFIX type with FNV_1A_COMPOSITE + settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX).build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + resolver = new RemoteStoreCustomMetadataResolver(remoteStoreSettings, () -> Version.V_2_14_0, () -> repositoriesService, settings); + assertEquals(PathType.HASHED_PREFIX, resolver.getPathStrategy().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.getPathStrategy().getHashAlgorithm()); + + // HASHED_PREFIX type with FNV_1A_COMPOSITE + settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX).build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + resolver = new RemoteStoreCustomMetadataResolver(remoteStoreSettings, () -> Version.V_2_14_0, () -> repositoriesService, settings); + assertEquals(PathType.HASHED_PREFIX, resolver.getPathStrategy().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.getPathStrategy().getHashAlgorithm()); + + // HASHED_PREFIX type with FNV_1A_BASE64 + settings = Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX) + .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) + .build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + resolver = new RemoteStoreCustomMetadataResolver(remoteStoreSettings, () -> Version.V_2_14_0, () -> repositoriesService, settings); + assertEquals(PathType.HASHED_PREFIX, resolver.getPathStrategy().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.getPathStrategy().getHashAlgorithm()); + + // HASHED_PREFIX type with FNV_1A_BASE64 + settings = Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX) + .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) + .build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + resolver = new RemoteStoreCustomMetadataResolver(remoteStoreSettings, () -> Version.V_2_14_0, () -> repositoriesService, settings); + assertEquals(PathType.HASHED_PREFIX, resolver.getPathStrategy().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.getPathStrategy().getHashAlgorithm()); + } + + public void testGetPathStrategyStrategyWithDynamicUpdate() { + + // Default value + Settings settings = Settings.builder().build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + RemoteStoreCustomMetadataResolver resolver = new RemoteStoreCustomMetadataResolver( + remoteStoreSettings, + () -> Version.V_2_14_0, + () -> repositoriesService, + settings + ); + assertEquals(PathType.FIXED, resolver.getPathStrategy().getType()); + assertNull(resolver.getPathStrategy().getHashAlgorithm()); + + // Set HASHED_PREFIX with default hash algorithm + clusterSettings.applySettings( + Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX).build() + ); + assertEquals(PathType.HASHED_PREFIX, resolver.getPathStrategy().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.getPathStrategy().getHashAlgorithm()); + + // Set HASHED_PREFIX with FNV_1A_BASE64 hash algorithm + clusterSettings.applySettings( + Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX) + .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) + .build() + ); + assertEquals(PathType.HASHED_PREFIX, resolver.getPathStrategy().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.getPathStrategy().getHashAlgorithm()); + + // Set HASHED_INFIX with default hash algorithm + clusterSettings.applySettings( + Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_INFIX).build() + ); + assertEquals(PathType.HASHED_INFIX, resolver.getPathStrategy().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.getPathStrategy().getHashAlgorithm()); + + // Set HASHED_INFIX with FNV_1A_BASE64 hash algorithm + clusterSettings.applySettings( + Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_INFIX) + .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) + .build() + ); + assertEquals(PathType.HASHED_INFIX, resolver.getPathStrategy().getType()); + assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.getPathStrategy().getHashAlgorithm()); + } + + public void testTranslogMetadataAllowedTrueWithMinVersionNewer() { + Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_TRANSLOG_METADATA.getKey(), true).build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + BlobStoreRepository repositoryMock = mock(BlobStoreRepository.class); + when(repositoriesService.repository(getRemoteStoreTranslogRepo(settings))).thenReturn(repositoryMock); + BlobStore blobStoreMock = mock(BlobStore.class); + when(repositoryMock.blobStore()).thenReturn(blobStoreMock); + when(blobStoreMock.isBlobMetadataEnabled()).thenReturn(true); + RemoteStoreCustomMetadataResolver resolver = new RemoteStoreCustomMetadataResolver( + remoteStoreSettings, + () -> Version.CURRENT, + () -> repositoriesService, + settings + ); + assertTrue(resolver.isTranslogMetadataEnabled()); + } + + public void testTranslogMetadataAllowedFalseWithMinVersionNewer() { + Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_TRANSLOG_METADATA.getKey(), false).build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + RemoteStoreCustomMetadataResolver resolver = new RemoteStoreCustomMetadataResolver( + remoteStoreSettings, + () -> Version.CURRENT, + () -> repositoriesService, + settings + ); + assertFalse(resolver.isTranslogMetadataEnabled()); + } + + public void testTranslogMetadataAllowedMinVersionOlder() { + Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_TRANSLOG_METADATA.getKey(), randomBoolean()).build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); + RemoteStoreCustomMetadataResolver resolver = new RemoteStoreCustomMetadataResolver( + remoteStoreSettings, + () -> Version.V_2_14_0, + () -> repositoriesService, + settings + ); + assertFalse(resolver.isTranslogMetadataEnabled()); + } + +} diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java deleted file mode 100644 index de61c902bf13e..0000000000000 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java +++ /dev/null @@ -1,148 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.remote; - -import org.opensearch.Version; -import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.settings.Settings; -import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; -import org.opensearch.index.remote.RemoteStoreEnums.PathType; -import org.opensearch.indices.RemoteStoreSettings; -import org.opensearch.test.OpenSearchTestCase; - -import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING; -import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING; - -public class RemoteStorePathStrategyResolverTests extends OpenSearchTestCase { - - public void testGetMinVersionOlder() { - Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), randomFrom(PathType.values())).build(); - ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); - RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.V_2_13_0); - assertEquals(PathType.FIXED, resolver.get().getType()); - assertNull(resolver.get().getHashAlgorithm()); - } - - public void testGetMinVersionNewer() { - PathType pathType = randomFrom(PathType.values()); - Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), pathType).build(); - ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); - RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.V_2_14_0); - assertEquals(pathType, resolver.get().getType()); - if (pathType.requiresHashAlgorithm()) { - assertNotNull(resolver.get().getHashAlgorithm()); - } else { - assertNull(resolver.get().getHashAlgorithm()); - } - } - - public void testGetStrategy() { - // FIXED type - Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.FIXED).build(); - ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); - RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.V_2_14_0); - assertEquals(PathType.FIXED, resolver.get().getType()); - - // FIXED type with hash algorithm - settings = Settings.builder() - .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.FIXED) - .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), randomFrom(PathHashAlgorithm.values())) - .build(); - clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); - resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.V_2_14_0); - assertEquals(PathType.FIXED, resolver.get().getType()); - - // HASHED_PREFIX type with FNV_1A_COMPOSITE - settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX).build(); - clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); - resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.V_2_14_0); - assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); - assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.get().getHashAlgorithm()); - - // HASHED_PREFIX type with FNV_1A_COMPOSITE - settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX).build(); - clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); - resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.V_2_14_0); - assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); - assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.get().getHashAlgorithm()); - - // HASHED_PREFIX type with FNV_1A_BASE64 - settings = Settings.builder() - .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX) - .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) - .build(); - clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); - resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.V_2_14_0); - assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); - assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.get().getHashAlgorithm()); - - // HASHED_PREFIX type with FNV_1A_BASE64 - settings = Settings.builder() - .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX) - .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) - .build(); - clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); - resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.V_2_14_0); - assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); - assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.get().getHashAlgorithm()); - } - - public void testGetStrategyWithDynamicUpdate() { - - // Default value - Settings settings = Settings.builder().build(); - ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, clusterSettings); - RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(remoteStoreSettings, () -> Version.V_2_14_0); - assertEquals(PathType.FIXED, resolver.get().getType()); - assertNull(resolver.get().getHashAlgorithm()); - - // Set HASHED_PREFIX with default hash algorithm - clusterSettings.applySettings( - Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX).build() - ); - assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); - assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.get().getHashAlgorithm()); - - // Set HASHED_PREFIX with FNV_1A_BASE64 hash algorithm - clusterSettings.applySettings( - Settings.builder() - .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX) - .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) - .build() - ); - assertEquals(PathType.HASHED_PREFIX, resolver.get().getType()); - assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.get().getHashAlgorithm()); - - // Set HASHED_INFIX with default hash algorithm - clusterSettings.applySettings( - Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_INFIX).build() - ); - assertEquals(PathType.HASHED_INFIX, resolver.get().getType()); - assertEquals(PathHashAlgorithm.FNV_1A_COMPOSITE_1, resolver.get().getHashAlgorithm()); - - // Set HASHED_INFIX with FNV_1A_BASE64 hash algorithm - clusterSettings.applySettings( - Settings.builder() - .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), PathType.HASHED_INFIX) - .put(CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING.getKey(), PathHashAlgorithm.FNV_1A_BASE64) - .build() - ); - assertEquals(PathType.HASHED_INFIX, resolver.get().getType()); - assertEquals(PathHashAlgorithm.FNV_1A_BASE64, resolver.get().getHashAlgorithm()); - } -} diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java index c1fc0cdaa0d3b..59c3d3dccdd0f 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java @@ -8,9 +8,13 @@ package org.opensearch.index.remote; +import org.opensearch.Version; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.support.PlainBlobMetadata; +import org.opensearch.common.settings.Settings; import org.opensearch.index.shard.IndexShardTestUtils; import org.opensearch.index.store.RemoteSegmentStoreDirectory; import org.opensearch.index.translog.transfer.TranslogTransferMetadata; @@ -26,7 +30,9 @@ import java.util.Map; import java.util.stream.Collectors; +import static org.opensearch.cluster.metadata.IndexMetadata.REMOTE_STORE_CUSTOM_KEY; import static org.opensearch.index.remote.RemoteStoreUtils.URL_BASE64_CHARSET; +import static org.opensearch.index.remote.RemoteStoreUtils.determineTranslogMetadataEnabled; import static org.opensearch.index.remote.RemoteStoreUtils.longToCompositeBase64AndBinaryEncoding; import static org.opensearch.index.remote.RemoteStoreUtils.longToUrlBase64; import static org.opensearch.index.remote.RemoteStoreUtils.urlBase64ToLong; @@ -40,6 +46,7 @@ public class RemoteStoreUtilsTests extends OpenSearchTestCase { private static Map BASE64_CHARSET_IDX_MAP; + private static String index = "test-index"; static { Map charToIndexMap = new HashMap<>(); @@ -341,4 +348,54 @@ static long compositeUrlBase64BinaryEncodingToLong(String encodedValue) { String binaryString = base64PartBinary + encodedValue.substring(1); return new BigInteger(binaryString, 2).longValue(); } + + public void testDeterdetermineTranslogMetadataEnabledWhenTrue() { + Metadata metadata = createIndexMetadataWithRemoteStoreSettings(index, 1); + IndexMetadata indexMetadata = metadata.index(index); + assertTrue(determineTranslogMetadataEnabled(indexMetadata)); + } + + public void testDeterdetermineTranslogMetadataEnabledWhenFalse() { + Metadata metadata = createIndexMetadataWithRemoteStoreSettings(index, 0); + IndexMetadata indexMetadata = metadata.index(index); + assertFalse(determineTranslogMetadataEnabled(indexMetadata)); + } + + public void testDeterdetermineTranslogMetadataEnabledWhenKeyNotFound() { + Metadata metadata = createIndexMetadataWithRemoteStoreSettings(index, 2); + IndexMetadata indexMetadata = metadata.index(index); + assertThrows(AssertionError.class, () -> determineTranslogMetadataEnabled(indexMetadata)); + } + + private static Metadata createIndexMetadataWithRemoteStoreSettings(String indexName, int option) { + IndexMetadata.Builder indexMetadata = IndexMetadata.builder(indexName); + indexMetadata.settings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING.getKey(), true) + .put(IndexMetadata.INDEX_REMOTE_TRANSLOG_REPOSITORY_SETTING.getKey(), "dummy-tlog-repo") + .put(IndexMetadata.INDEX_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING.getKey(), "dummy-segment-repo") + .put(IndexMetadata.INDEX_REPLICATION_TYPE_SETTING.getKey(), "SEGMENT") + .build() + ).putCustom(REMOTE_STORE_CUSTOM_KEY, getCustomDataMap(option)).build(); + return Metadata.builder().put(indexMetadata).build(); + } + + private static Map getCustomDataMap(int option) { + if (option > 1) { + return Map.of(); + } + String value = (option == 1) ? "true" : "false"; + return Map.of( + RemoteStoreEnums.PathType.NAME, + "dummy", + RemoteStoreEnums.PathHashAlgorithm.NAME, + "dummy", + IndexMetadata.TRANSLOG_METADATA_KEY, + value + ); + } + } diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceTests.java index e4f5a454b15f6..3ed90c0837663 100644 --- a/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceTests.java +++ b/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceTests.java @@ -26,14 +26,21 @@ import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import java.io.ByteArrayInputStream; import java.io.IOException; +import java.io.InputStream; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardOpenOption; +import java.util.Base64; +import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import static org.opensearch.index.translog.transfer.TranslogTransferManager.CHECKPOINT_FILE_DATA_KEY; + public class BlobStoreTransferServiceTests extends OpenSearchTestCase { private ThreadPool threadPool; @@ -144,4 +151,37 @@ private Environment createEnvironment() { .build() ); } + + public void testBuildTransferFileMetadata_EmptyInputStream() throws IOException { + InputStream emptyInputStream = new ByteArrayInputStream(new byte[0]); + Map metadata = BlobStoreTransferService.buildTransferFileMetadata(emptyInputStream); + assertTrue(metadata.containsKey(CHECKPOINT_FILE_DATA_KEY)); + assertEquals("", metadata.get(CHECKPOINT_FILE_DATA_KEY)); + } + + public void testBuildTransferFileMetadata_NonEmptyInputStream() throws IOException { + String inputData = "This is a test input stream."; + InputStream inputStream = new ByteArrayInputStream(inputData.getBytes(StandardCharsets.UTF_8)); + Map metadata = BlobStoreTransferService.buildTransferFileMetadata(inputStream); + assertTrue(metadata.containsKey(CHECKPOINT_FILE_DATA_KEY)); + String expectedBase64String = Base64.getEncoder().encodeToString(inputData.getBytes(StandardCharsets.UTF_8)); + assertEquals(expectedBase64String, metadata.get(CHECKPOINT_FILE_DATA_KEY)); + } + + public void testBuildTransferFileMetadata_InputStreamExceedsLimit() { + byte[] largeData = new byte[1025]; // 1025 bytes, exceeding the 1KB limit + InputStream largeInputStream = new ByteArrayInputStream(largeData); + IOException exception = assertThrows(IOException.class, () -> BlobStoreTransferService.buildTransferFileMetadata(largeInputStream)); + assertEquals(exception.getMessage(), "Input stream exceeds 1KB limit"); + } + + public void testBuildTransferFileMetadata_SmallInputStreamOptimization() throws IOException { + String inputData = "Small input"; + InputStream inputStream = new ByteArrayInputStream(inputData.getBytes(StandardCharsets.UTF_8)); + Map metadata = BlobStoreTransferService.buildTransferFileMetadata(inputStream); + assertTrue(metadata.containsKey(CHECKPOINT_FILE_DATA_KEY)); + String expectedBase64String = Base64.getEncoder().encodeToString(inputData.getBytes(StandardCharsets.UTF_8)); + assertEquals(expectedBase64String, metadata.get(CHECKPOINT_FILE_DATA_KEY)); + } + } diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java index 8b3fc6651a505..c6f9838ad2d52 100644 --- a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java +++ b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java @@ -15,6 +15,7 @@ import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; +import org.opensearch.common.blobstore.InputStreamWithMetadata; import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.common.blobstore.support.PlainBlobMetadata; import org.opensearch.common.collect.Tuple; @@ -41,9 +42,12 @@ import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; +import java.util.Base64; import java.util.Collections; +import java.util.HashMap; import java.util.LinkedList; import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.UUID; @@ -53,15 +57,17 @@ import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.TRANSLOG; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; +import static org.opensearch.index.translog.transfer.TranslogTransferManager.CHECKPOINT_FILE_DATA_KEY; import static org.opensearch.index.translog.transfer.TranslogTransferMetadata.METADATA_SEPARATOR; import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.ArgumentMatchers.anySet; import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyMap; +import static org.mockito.Mockito.anySet; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -81,6 +87,7 @@ public class TranslogTransferManagerTests extends OpenSearchTestCase { FileTransferTracker tracker; TranslogTransferManager translogTransferManager; long delayForBlobDownload; + boolean isTranslogMetadataEnabled; @Override public void setUp() throws Exception { @@ -97,6 +104,7 @@ public void setUp() throws Exception { tlogBytes = "Hello Translog".getBytes(StandardCharsets.UTF_8); ckpBytes = "Hello Checkpoint".getBytes(StandardCharsets.UTF_8); tracker = new FileTransferTracker(new ShardId("index", "indexUuid", 0), remoteTranslogTransferTracker); + isTranslogMetadataEnabled = false; translogTransferManager = new TranslogTransferManager( shardId, transferService, @@ -104,7 +112,8 @@ public void setUp() throws Exception { remoteBaseTransferPath.add(METADATA.getName()), tracker, remoteTranslogTransferTracker, - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + isTranslogMetadataEnabled ); delayForBlobDownload = 1; @@ -170,7 +179,8 @@ public void onFailure(TransferFileSnapshot fileSnapshot, Exception e) { remoteBaseTransferPath.add(METADATA.getName()), fileTransferTracker, remoteTranslogTransferTracker, - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + isTranslogMetadataEnabled ); assertTrue(translogTransferManager.transferSnapshot(createTransferSnapshot(), new TranslogTransferListener() { @@ -222,7 +232,8 @@ public void testTransferSnapshotOnUploadTimeout() throws Exception { remoteBaseTransferPath.add(METADATA.getName()), fileTransferTracker, remoteTranslogTransferTracker, - remoteStoreSettings + remoteStoreSettings, + isTranslogMetadataEnabled ); SetOnce exception = new SetOnce<>(); translogTransferManager.transferSnapshot(createTransferSnapshot(), new TranslogTransferListener() { @@ -265,7 +276,8 @@ public void testTransferSnapshotOnThreadInterrupt() throws Exception { remoteBaseTransferPath.add(METADATA.getName()), fileTransferTracker, remoteTranslogTransferTracker, - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + isTranslogMetadataEnabled ); SetOnce exception = new SetOnce<>(); @@ -297,64 +309,66 @@ public void onUploadFailed(TransferSnapshot transferSnapshot, Exception ex) { uploadThread.get().interrupt(); } - private TransferSnapshot createTransferSnapshot() { - return new TransferSnapshot() { - @Override - public Set getCheckpointFileSnapshots() { - try { - return Set.of( - new CheckpointFileSnapshot( - primaryTerm, - generation, - minTranslogGeneration, - createTempFile(Translog.TRANSLOG_FILE_PREFIX + generation, Translog.CHECKPOINT_SUFFIX), - null - ), - new CheckpointFileSnapshot( - primaryTerm, - generation, - minTranslogGeneration, - createTempFile(Translog.TRANSLOG_FILE_PREFIX + (generation - 1), Translog.CHECKPOINT_SUFFIX), - null - ) - ); - } catch (IOException e) { - throw new AssertionError("Failed to create temp file", e); + private TransferSnapshot createTransferSnapshot() throws IOException { + try { + CheckpointFileSnapshot checkpointFileSnapshot1 = new CheckpointFileSnapshot( + primaryTerm, + generation, + minTranslogGeneration, + createTempFile(Translog.TRANSLOG_FILE_PREFIX + generation, Translog.CHECKPOINT_SUFFIX), + null + ); + CheckpointFileSnapshot checkpointFileSnapshot2 = new CheckpointFileSnapshot( + primaryTerm, + generation, + minTranslogGeneration, + createTempFile(Translog.TRANSLOG_FILE_PREFIX + (generation - 1), Translog.CHECKPOINT_SUFFIX), + null + ); + TranslogFileSnapshot translogFileSnapshot1 = new TranslogFileSnapshot( + primaryTerm, + generation, + createTempFile(Translog.TRANSLOG_FILE_PREFIX + generation, Translog.TRANSLOG_FILE_SUFFIX), + null + ); + TranslogFileSnapshot translogFileSnapshot2 = new TranslogFileSnapshot( + primaryTerm, + generation - 1, + createTempFile(Translog.TRANSLOG_FILE_PREFIX + (generation - 1), Translog.TRANSLOG_FILE_SUFFIX), + null + ); + + return new TransferSnapshot() { + @Override + public Set getCheckpointFileSnapshots() { + return Set.of(checkpointFileSnapshot1, checkpointFileSnapshot2); } - } - @Override - public Set getTranslogFileSnapshots() { - try { - return Set.of( - new TranslogFileSnapshot( - primaryTerm, - generation, - createTempFile(Translog.TRANSLOG_FILE_PREFIX + generation, Translog.TRANSLOG_FILE_SUFFIX), - null - ), - new TranslogFileSnapshot( - primaryTerm, - generation - 1, - createTempFile(Translog.TRANSLOG_FILE_PREFIX + (generation - 1), Translog.TRANSLOG_FILE_SUFFIX), - null - ) - ); - } catch (IOException e) { - throw new AssertionError("Failed to create temp file", e); + @Override + public Set getTranslogFileSnapshots() { + return Set.of(translogFileSnapshot1, translogFileSnapshot2); } - } - @Override - public TranslogTransferMetadata getTranslogTransferMetadata() { - return new TranslogTransferMetadata(primaryTerm, generation, minTranslogGeneration, randomInt(5)); - } + @Override + public TranslogTransferMetadata getTranslogTransferMetadata() { + return new TranslogTransferMetadata(primaryTerm, generation, minTranslogGeneration, randomInt(5)); + } - @Override - public String toString() { - return "test-to-string"; - } - }; + @Override + public Set getTranslogFileSnapshotWithMetadata() throws IOException { + translogFileSnapshot1.setMetadataFileInputStream(checkpointFileSnapshot1.inputStream()); + translogFileSnapshot2.setMetadataFileInputStream(checkpointFileSnapshot2.inputStream()); + return Set.of(translogFileSnapshot1, translogFileSnapshot2); + } + + @Override + public String toString() { + return "test-to-string"; + } + }; + } catch (Exception e) { + throw new IOException("Failed to create transfer snapshot"); + } } public void testReadMetadataNoFile() throws IOException { @@ -502,7 +516,8 @@ public void testDeleteTranslogSuccess() throws Exception { remoteBaseTransferPath.add(METADATA.getName()), tracker, remoteTranslogTransferTracker, - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + isTranslogMetadataEnabled ); String translogFile = "translog-19.tlog", checkpointFile = "translog-19.ckp"; tracker.add(translogFile, true); @@ -567,7 +582,8 @@ public void testDeleteTranslogFailure() throws Exception { remoteBaseTransferPath.add(METADATA.getName()), tracker, remoteTranslogTransferTracker, - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + isTranslogMetadataEnabled ); String translogFile = "translog-19.tlog", checkpointFile = "translog-19.ckp"; tracker.add(translogFile, true); @@ -624,4 +640,117 @@ public void testMetadataConflict() throws InterruptedException { assertThrows(RuntimeException.class, translogTransferManager::readMetadata); } + + // tests for cases when ckp is stored as translog metadata. + public void testTransferSnapshotWithTranslogMetadata() throws Exception { + AtomicInteger fileTransferSucceeded = new AtomicInteger(); + AtomicInteger fileTransferFailed = new AtomicInteger(); + AtomicInteger translogTransferSucceeded = new AtomicInteger(); + AtomicInteger translogTransferFailed = new AtomicInteger(); + + isTranslogMetadataEnabled = true; + + doNothing().when(transferService) + .uploadBlob( + any(TransferFileSnapshot.class), + Mockito.eq(remoteBaseTransferPath.add(String.valueOf(primaryTerm))), + any(WritePriority.class) + ); + doAnswer(invocationOnMock -> { + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; + Set transferFileSnapshots = (Set) invocationOnMock.getArguments()[0]; + transferFileSnapshots.forEach(transferFileSnapshot -> { + assertNotNull(transferFileSnapshot.getMetadataFileInputStream()); + listener.onResponse(transferFileSnapshot); + }); + return null; + }).when(transferService).uploadBlobs(anySet(), anyMap(), any(ActionListener.class), any(WritePriority.class)); + + FileTransferTracker fileTransferTracker = new FileTransferTracker( + new ShardId("index", "indexUUid", 0), + remoteTranslogTransferTracker + ) { + @Override + public void onSuccess(TransferFileSnapshot fileSnapshot) { + fileTransferSucceeded.incrementAndGet(); + super.onSuccess(fileSnapshot); + } + + @Override + public void onFailure(TransferFileSnapshot fileSnapshot, Exception e) { + fileTransferFailed.incrementAndGet(); + super.onFailure(fileSnapshot, e); + } + + }; + + translogTransferManager = new TranslogTransferManager( + shardId, + transferService, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), + fileTransferTracker, + remoteTranslogTransferTracker, + DefaultRemoteStoreSettings.INSTANCE, + isTranslogMetadataEnabled + ); + + assertTrue(translogTransferManager.transferSnapshot(createTransferSnapshot(), new TranslogTransferListener() { + @Override + public void onUploadComplete(TransferSnapshot transferSnapshot) { + translogTransferSucceeded.incrementAndGet(); + } + + @Override + public void onUploadFailed(TransferSnapshot transferSnapshot, Exception ex) { + translogTransferFailed.incrementAndGet(); + } + })); + assertEquals(2, fileTransferSucceeded.get()); + assertEquals(0, fileTransferFailed.get()); + assertEquals(1, translogTransferSucceeded.get()); + assertEquals(0, translogTransferFailed.get()); + assertEquals(2, fileTransferTracker.allUploaded().size()); + } + + public void testDownloadTranslogWithMetadata() throws IOException { + isTranslogMetadataEnabled = true; + translogTransferManager = new TranslogTransferManager( + shardId, + transferService, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), + tracker, + remoteTranslogTransferTracker, + DefaultRemoteStoreSettings.INSTANCE, + isTranslogMetadataEnabled + ); + Path location = createTempDir(); + assertFalse(Files.exists(location.resolve("translog-23.tlog"))); + assertFalse(Files.exists(location.resolve("translog-23.ckp"))); + mockDownloadBlobWithMetadataResponse(); + translogTransferManager.downloadTranslog("12", "23", location); + verify(transferService, times(0)).downloadBlob(any(BlobPath.class), eq("translog-23.tlog")); + verify(transferService, times(0)).downloadBlob(any(BlobPath.class), eq("translog-23.ckp")); + verify(transferService, times(1)).downloadBlobWithMetadata(any(BlobPath.class), eq("translog-23.tlog")); + assertTrue(Files.exists(location.resolve("translog-23.tlog"))); + assertTrue(Files.exists(location.resolve("translog-23.ckp"))); + assertTlogCkpDownloadStatsWithMetadata(); + } + + private void mockDownloadBlobWithMetadataResponse() throws IOException { + Map metadata = new HashMap<>(); + String ckpDataString = Base64.getEncoder().encodeToString(ckpBytes); + metadata.put(CHECKPOINT_FILE_DATA_KEY, ckpDataString); + when(transferService.downloadBlobWithMetadata(any(BlobPath.class), eq("translog-23.tlog"))).thenAnswer(invocation -> { + Thread.sleep(delayForBlobDownload); + return new InputStreamWithMetadata(new ByteArrayInputStream(tlogBytes), metadata); + }); + } + + private void assertTlogCkpDownloadStatsWithMetadata() { + assertEquals(tlogBytes.length, remoteTranslogTransferTracker.getDownloadBytesSucceeded()); + // Expect delay for both tlog and ckp file + assertTrue(remoteTranslogTransferTracker.getTotalDownloadTimeInMillis() >= delayForBlobDownload); + } } diff --git a/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java index 69282c13a3f37..b6ca632d4b49a 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java @@ -314,7 +314,8 @@ public IndexMetadata upgradeIndexMetadata(IndexMetadata indexMetadata, Version m systemIndices, true, awarenessReplicaBalance, - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + null ); transportCloseIndexAction = new TransportCloseIndexAction( diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 95a343f3b4025..3d31fa4df2455 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -2164,7 +2164,8 @@ public void onFailure(final Exception e) { systemIndices, false, new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), - DefaultRemoteStoreSettings.INSTANCE + DefaultRemoteStoreSettings.INSTANCE, + null ); actions.put( CreateIndexAction.INSTANCE, diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 5fa23a7b0827a..055bcd159efc3 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -2659,6 +2659,7 @@ private static Settings buildRemoteStoreNodeAttributes( .put(segmentRepoSettingsAttributeKeyPrefix + "chunk_size", 200, ByteSizeUnit.BYTES); } settings.put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), randomFrom(PathType.values())); + settings.put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_METADATA.getKey(), randomBoolean()); return settings.build(); } From 8d3e9a812e435aaaf10a1ddd8ecb9951552b1d97 Mon Sep 17 00:00:00 2001 From: Varun Bansal Date: Fri, 24 May 2024 18:30:03 +0530 Subject: [PATCH 04/10] [Remote Store] Add segment transfer timeout dynamic setting (#13679) (#13793) * [Remote Store] Add segment transfer timeout dynamic setting Signed-off-by: Varun Bansal (cherry picked from commit b3049fb5ec860f63fdfe33c5b176869b1e4255e6) --- CHANGELOG.md | 1 + .../RemoteStoreRefreshListenerIT.java | 1 - .../common/settings/ClusterSettings.java | 1 + .../opensearch/index/shard/IndexShard.java | 3 +- .../shard/RemoteStoreRefreshListener.java | 14 ++- .../shard/SegmentUploadFailedException.java | 28 ++++++ .../indices/RemoteStoreSettings.java | 25 +++++ .../RemoteStoreRefreshListenerTests.java | 92 +++++++++++++++++-- 8 files changed, 153 insertions(+), 12 deletions(-) create mode 100644 server/src/main/java/org/opensearch/index/shard/SegmentUploadFailedException.java diff --git a/CHANGELOG.md b/CHANGELOG.md index d52efd755b892..bf1d91f0c6bcc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add useCompoundFile index setting ([#13478](https://github.com/opensearch-project/OpenSearch/pull/13478)) - Make outbound side of transport protocol dependent ([#13293](https://github.com/opensearch-project/OpenSearch/pull/13293)) - [Remote Store] Upload translog checkpoint as object metadata to translog.tlog([#13637](https://github.com/opensearch-project/OpenSearch/pull/13637)) +- [Remote Store] Add dynamic cluster settings to set timeout for segments upload to Remote Store ([#13679](https://github.com/opensearch-project/OpenSearch/pull/13679)) ### Dependencies - Bump `com.github.spullara.mustache.java:compiler` from 0.9.10 to 0.9.13 ([#13329](https://github.com/opensearch-project/OpenSearch/pull/13329), [#13559](https://github.com/opensearch-project/OpenSearch/pull/13559)) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java index 65016c4976157..7ae08bf968ade 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java @@ -26,7 +26,6 @@ import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; import static org.opensearch.index.remote.RemoteStorePressureSettings.REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED; -import static org.opensearch.test.OpenSearchTestCase.getShardLevelBlobPath; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteStoreRefreshListenerIT extends AbstractRemoteStoreMockRepositoryIntegTestCase { diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 72a7df6ecc189..47a3371baf6c5 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -739,6 +739,7 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING, RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_TRANSFER_TIMEOUT_SETTING, + RemoteStoreSettings.CLUSTER_REMOTE_SEGMENT_TRANSFER_TIMEOUT_SETTING, RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING, RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING, RemoteStoreSettings.CLUSTER_REMOTE_MAX_TRANSLOG_READERS, diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index fbec820882a51..96e9473663385 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -3985,7 +3985,8 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) thro new RemoteStoreRefreshListener( this, this.checkpointPublisher, - remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(shardId()) + remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(shardId()), + remoteStoreSettings ) ); } diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java index 36c9e9e663212..243af4ec248c3 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java @@ -33,6 +33,7 @@ import org.opensearch.index.store.RemoteSegmentStoreDirectory; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.index.translog.Translog; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.threadpool.ThreadPool; @@ -45,6 +46,7 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; @@ -89,11 +91,13 @@ public final class RemoteStoreRefreshListener extends ReleasableRetryableRefresh private volatile long primaryTerm; private volatile Iterator backoffDelayIterator; private final SegmentReplicationCheckpointPublisher checkpointPublisher; + private final RemoteStoreSettings remoteStoreSettings; public RemoteStoreRefreshListener( IndexShard indexShard, SegmentReplicationCheckpointPublisher checkpointPublisher, - RemoteSegmentTransferTracker segmentTracker + RemoteSegmentTransferTracker segmentTracker, + RemoteStoreSettings remoteStoreSettings ) { super(indexShard.getThreadPool()); logger = Loggers.getLogger(getClass(), indexShard.shardId()); @@ -116,6 +120,7 @@ public RemoteStoreRefreshListener( this.segmentTracker = segmentTracker; resetBackOffDelayIterator(); this.checkpointPublisher = checkpointPublisher; + this.remoteStoreSettings = remoteStoreSettings; } @Override @@ -286,7 +291,12 @@ public void onFailure(Exception e) { // Start the segments files upload uploadNewSegments(localSegmentsPostRefresh, localSegmentsSizeMap, segmentUploadsCompletedListener); - latch.await(); + if (latch.await( + remoteStoreSettings.getClusterRemoteSegmentTransferTimeout().millis(), + TimeUnit.MILLISECONDS + ) == false) { + throw new SegmentUploadFailedException("Timeout while waiting for remote segment transfer to complete"); + } } catch (EngineException e) { logger.warn("Exception while reading SegmentInfosSnapshot", e); } diff --git a/server/src/main/java/org/opensearch/index/shard/SegmentUploadFailedException.java b/server/src/main/java/org/opensearch/index/shard/SegmentUploadFailedException.java new file mode 100644 index 0000000000000..bbff399fb71ff --- /dev/null +++ b/server/src/main/java/org/opensearch/index/shard/SegmentUploadFailedException.java @@ -0,0 +1,28 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.shard; + +import java.io.IOException; + +/** + * Exception to be thrown when a segment upload fails. + * + * @opensearch.internal + */ +public class SegmentUploadFailedException extends IOException { + + /** + * Creates a new SegmentUploadFailedException. + * + * @param message error message + */ + public SegmentUploadFailedException(String message) { + super(message); + } +} diff --git a/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java b/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java index 836af2a14637f..074186f64a75d 100644 --- a/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java +++ b/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java @@ -117,9 +117,21 @@ public class RemoteStoreSettings { Property.NodeScope ); + /** + * Controls timeout value while uploading segment files to remote segment store + */ + public static final Setting CLUSTER_REMOTE_SEGMENT_TRANSFER_TIMEOUT_SETTING = Setting.timeSetting( + "cluster.remote_store.segment.transfer_timeout", + TimeValue.timeValueMinutes(30), + TimeValue.timeValueMinutes(10), + Property.NodeScope, + Property.Dynamic + ); + private volatile TimeValue clusterRemoteTranslogBufferInterval; private volatile int minRemoteSegmentMetadataFiles; private volatile TimeValue clusterRemoteTranslogTransferTimeout; + private volatile TimeValue clusterRemoteSegmentTransferTimeout; private volatile RemoteStoreEnums.PathType pathType; private volatile RemoteStoreEnums.PathHashAlgorithm pathHashAlgorithm; private volatile int maxRemoteTranslogReaders; @@ -156,6 +168,11 @@ public RemoteStoreSettings(Settings settings, ClusterSettings clusterSettings) { maxRemoteTranslogReaders = CLUSTER_REMOTE_MAX_TRANSLOG_READERS.get(settings); clusterSettings.addSettingsUpdateConsumer(CLUSTER_REMOTE_MAX_TRANSLOG_READERS, this::setMaxRemoteTranslogReaders); + clusterRemoteSegmentTransferTimeout = CLUSTER_REMOTE_SEGMENT_TRANSFER_TIMEOUT_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer( + CLUSTER_REMOTE_SEGMENT_TRANSFER_TIMEOUT_SETTING, + this::setClusterRemoteSegmentTransferTimeout + ); } public TimeValue getClusterRemoteTranslogBufferInterval() { @@ -178,10 +195,18 @@ public TimeValue getClusterRemoteTranslogTransferTimeout() { return clusterRemoteTranslogTransferTimeout; } + public TimeValue getClusterRemoteSegmentTransferTimeout() { + return clusterRemoteSegmentTransferTimeout; + } + private void setClusterRemoteTranslogTransferTimeout(TimeValue clusterRemoteTranslogTransferTimeout) { this.clusterRemoteTranslogTransferTimeout = clusterRemoteTranslogTransferTimeout; } + private void setClusterRemoteSegmentTransferTimeout(TimeValue clusterRemoteSegmentTransferTimeout) { + this.clusterRemoteSegmentTransferTimeout = clusterRemoteSegmentTransferTimeout; + } + @ExperimentalApi public RemoteStoreEnums.PathType getPathType() { return pathType; diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java index 33f6c67b94b3d..e0ee58cbfdb22 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java @@ -23,6 +23,7 @@ import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.engine.InternalEngineFactory; @@ -34,6 +35,7 @@ import org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils; import org.opensearch.index.store.Store; import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; +import org.opensearch.indices.DefaultRemoteStoreSettings; import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.common.ReplicationType; @@ -90,7 +92,12 @@ public void setup(boolean primary, int numberOfDocs) throws IOException { remoteStoreStatsTrackerFactory = new RemoteStoreStatsTrackerFactory(clusterService, Settings.EMPTY); remoteStoreStatsTrackerFactory.afterIndexShardCreated(indexShard); RemoteSegmentTransferTracker tracker = remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(indexShard.shardId()); - remoteStoreRefreshListener = new RemoteStoreRefreshListener(indexShard, SegmentReplicationCheckpointPublisher.EMPTY, tracker); + remoteStoreRefreshListener = new RemoteStoreRefreshListener( + indexShard, + SegmentReplicationCheckpointPublisher.EMPTY, + tracker, + DefaultRemoteStoreSettings.INSTANCE + ); } private void indexDocs(int startDocId, int numberOfDocs) throws IOException { @@ -175,7 +182,12 @@ public void testRemoteDirectoryInitThrowsException() throws IOException { when(remoteStore.directory()).thenReturn(remoteStoreFilterDirectory); // Since the thrown IOException is caught in the constructor, ctor should be invoked successfully. - new RemoteStoreRefreshListener(shard, SegmentReplicationCheckpointPublisher.EMPTY, mock(RemoteSegmentTransferTracker.class)); + new RemoteStoreRefreshListener( + shard, + SegmentReplicationCheckpointPublisher.EMPTY, + mock(RemoteSegmentTransferTracker.class), + DefaultRemoteStoreSettings.INSTANCE + ); // Validate that the stream of metadata file of remoteMetadataDirectory has been opened only once and the // listFilesByPrefixInLexicographicOrder has been called twice. @@ -370,6 +382,33 @@ public void testRefreshSuccessOnSecondAttempt() throws Exception { assertNoLagAndTotalUploadsFailed(segmentTracker, 1); } + public void testSegmentUploadTimeout() throws Exception { + // This covers the case where segment upload fails due to timeout + int succeedOnAttempt = 1; + // We spy on IndexShard.isPrimaryStarted() to validate that we have tried running remote time as per the expectation. + CountDownLatch refreshCountLatch = new CountDownLatch(succeedOnAttempt); + CountDownLatch successLatch = new CountDownLatch(2); + Tuple tuple = mockIndexShardWithRetryAndScheduleRefresh( + succeedOnAttempt, + refreshCountLatch, + successLatch, + 1, + new CountDownLatch(0), + true, + true + ); + assertBusy(() -> assertEquals(0, refreshCountLatch.getCount())); + assertBusy(() -> assertEquals(1, successLatch.getCount())); + RemoteStoreStatsTrackerFactory trackerFactory = tuple.v2(); + RemoteSegmentTransferTracker segmentTracker = trackerFactory.getRemoteSegmentTransferTracker(indexShard.shardId()); + assertBusy(() -> { + assertTrue(segmentTracker.getTotalUploadsFailed() > 1); + assertTrue(segmentTracker.getTotalUploadsSucceeded() < 2); + }); + // shutdown threadpool for avoid leaking threads + indexShard.getThreadPool().shutdownNow(); + } + /** * Tests retry flow after snapshot and metadata files have been uploaded to remote store in the failed attempt. * Snapshot and metadata files created in failed attempt should not break retry. @@ -469,6 +508,7 @@ public void testRefreshFailedDueToPrimaryTermMisMatch() throws Exception { successLatch, checkpointPublishSucceedOnAttempt, reachedCheckpointPublishLatch, + false, false ); @@ -520,7 +560,8 @@ private Tuple mockIn successLatch, succeedCheckpointPublishOnAttempt, reachedCheckpointPublishLatch, - true + true, + false ); } @@ -530,7 +571,8 @@ private Tuple mockIn CountDownLatch successLatch, int succeedCheckpointPublishOnAttempt, CountDownLatch reachedCheckpointPublishLatch, - boolean mockPrimaryTerm + boolean mockPrimaryTerm, + boolean testUploadTimeout ) throws IOException { // Create index shard that we will be using to mock different methods in IndexShard for the unit test indexShard = newStartedShard( @@ -564,9 +606,22 @@ private Tuple mockIn // Mock (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) indexShard.remoteStore().directory()) Store remoteStore = mock(Store.class); when(shard.remoteStore()).thenReturn(remoteStore); - RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = - (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) indexShard.remoteStore().directory()).getDelegate()) - .getDelegate(); + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory; + RemoteDirectory remoteDirectory = mock(RemoteDirectory.class); + + if (testUploadTimeout) { + remoteSegmentStoreDirectory = new RemoteSegmentStoreDirectory( + remoteDirectory, + mock(RemoteDirectory.class), + mock(RemoteStoreLockManager.class), + indexShard.getThreadPool(), + indexShard.shardId + ); + } else { + remoteSegmentStoreDirectory = (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) indexShard.remoteStore() + .directory()).getDelegate()).getDelegate(); + } + FilterDirectory remoteStoreFilterDirectory = new TestFilterDirectory(new TestFilterDirectory(remoteSegmentStoreDirectory)); when(remoteStore.directory()).thenReturn(remoteStoreFilterDirectory); @@ -638,7 +693,28 @@ private Tuple mockIn RemoteStoreSettings remoteStoreSettings = mock(RemoteStoreSettings.class); when(remoteStoreSettings.getMinRemoteSegmentMetadataFiles()).thenReturn(10); when(shard.getRemoteStoreSettings()).thenReturn(remoteStoreSettings); - RemoteStoreRefreshListener refreshListener = new RemoteStoreRefreshListener(shard, emptyCheckpointPublisher, tracker); + if (testUploadTimeout) { + when(remoteStoreSettings.getClusterRemoteSegmentTransferTimeout()).thenReturn(TimeValue.timeValueMillis(10)); + doAnswer(invocation -> { + ActionListener actionListener = invocation.getArgument(5); + indexShard.getThreadPool().executor(ThreadPool.Names.GENERIC).execute(() -> { + try { + Thread.sleep(30000); + } catch (InterruptedException e) { + logger.warn("copyFrom thread interrupted during sleep"); + } + actionListener.onResponse(null); + }); + return true; + }).when(remoteDirectory).copyFrom(any(), any(), any(), any(), any(), any(ActionListener.class), any(Boolean.class)); + } + + RemoteStoreRefreshListener refreshListener = new RemoteStoreRefreshListener( + shard, + emptyCheckpointPublisher, + tracker, + remoteStoreSettings + ); refreshListener.afterRefresh(true); return Tuple.tuple(refreshListener, remoteStoreStatsTrackerFactory); } From 32fb7ab7fe4ce85609a3a8aecac3697eb6a08561 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Fri, 24 May 2024 09:28:08 -0400 Subject: [PATCH 05/10] Bump netty from 4.1.109.Final to 4.1.110.Final (#13802) (#13807) (cherry picked from commit 27003270adedebd43eedbd0186c0825f2c0e8613) Signed-off-by: github-actions[bot] Signed-off-by: Andriy Redko Co-authored-by: github-actions[bot] --- CHANGELOG.md | 1 + buildSrc/version.properties | 2 +- modules/transport-netty4/build.gradle | 3 +++ .../licenses/netty-buffer-4.1.109.Final.jar.sha1 | 1 - .../licenses/netty-buffer-4.1.110.Final.jar.sha1 | 1 + .../licenses/netty-codec-4.1.109.Final.jar.sha1 | 1 - .../licenses/netty-codec-4.1.110.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.109.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.110.Final.jar.sha1 | 1 + .../licenses/netty-common-4.1.109.Final.jar.sha1 | 1 - .../licenses/netty-common-4.1.110.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.109.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.110.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.109.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.110.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.109.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.110.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.109.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.110.Final.jar.sha1 | 1 + .../licenses/netty-codec-dns-4.1.109.Final.jar.sha1 | 1 - .../licenses/netty-codec-dns-4.1.110.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.109.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.110.Final.jar.sha1 | 1 + .../licenses/netty-codec-socks-4.1.109.Final.jar.sha1 | 1 - .../licenses/netty-codec-socks-4.1.110.Final.jar.sha1 | 1 + .../licenses/netty-handler-proxy-4.1.109.Final.jar.sha1 | 1 - .../licenses/netty-handler-proxy-4.1.110.Final.jar.sha1 | 1 + .../licenses/netty-resolver-dns-4.1.109.Final.jar.sha1 | 1 - .../licenses/netty-resolver-dns-4.1.110.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.109.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.110.Final.jar.sha1 | 1 + .../repository-hdfs/licenses/netty-all-4.1.109.Final.jar.sha1 | 1 - .../repository-hdfs/licenses/netty-all-4.1.110.Final.jar.sha1 | 1 + plugins/repository-s3/build.gradle | 3 +++ .../repository-s3/licenses/netty-buffer-4.1.109.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-buffer-4.1.110.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-codec-4.1.109.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-codec-4.1.110.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.109.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.110.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.109.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.110.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-common-4.1.109.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-common-4.1.110.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.109.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.110.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.109.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.110.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.109.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.110.Final.jar.sha1 | 1 + .../netty-transport-classes-epoll-4.1.109.Final.jar.sha1 | 1 - .../netty-transport-classes-epoll-4.1.110.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.109.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.110.Final.jar.sha1 | 1 + plugins/transport-nio/build.gradle | 3 +++ .../transport-nio/licenses/netty-buffer-4.1.109.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-buffer-4.1.110.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-codec-4.1.109.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-codec-4.1.110.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.109.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.110.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-common-4.1.109.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-common-4.1.110.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.109.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.110.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.109.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.110.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.109.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.110.Final.jar.sha1 | 1 + plugins/transport-reactor-netty4/build.gradle | 3 +++ .../licenses/netty-buffer-4.1.109.Final.jar.sha1 | 1 - .../licenses/netty-buffer-4.1.110.Final.jar.sha1 | 1 + .../licenses/netty-codec-4.1.109.Final.jar.sha1 | 1 - .../licenses/netty-codec-4.1.110.Final.jar.sha1 | 1 + .../licenses/netty-codec-dns-4.1.109.Final.jar.sha1 | 1 - .../licenses/netty-codec-dns-4.1.110.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.109.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.110.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.109.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.110.Final.jar.sha1 | 1 + .../licenses/netty-common-4.1.109.Final.jar.sha1 | 1 - .../licenses/netty-common-4.1.110.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.109.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.110.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.109.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.110.Final.jar.sha1 | 1 + .../licenses/netty-resolver-dns-4.1.109.Final.jar.sha1 | 1 - .../licenses/netty-resolver-dns-4.1.110.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.109.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.110.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.109.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.110.Final.jar.sha1 | 1 + 92 files changed, 57 insertions(+), 44 deletions(-) delete mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.109.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.110.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.109.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.110.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.109.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.110.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-common-4.1.109.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-common-4.1.110.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.109.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.110.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.109.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.110.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.109.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.110.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.109.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.110.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.109.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.110.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.109.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.110.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.109.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.110.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.109.Final.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.110.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-buffer-4.1.109.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-buffer-4.1.110.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-4.1.109.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-4.1.110.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-http-4.1.109.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-http-4.1.110.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-common-4.1.109.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-common-4.1.110.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-handler-4.1.109.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-handler-4.1.110.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-resolver-4.1.109.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-resolver-4.1.110.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-4.1.109.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-4.1.110.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.109.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.110.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.109.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.110.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.109.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.110.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.109.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.110.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-common-4.1.109.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-common-4.1.110.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.109.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.110.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.109.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.110.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.109.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.110.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.109.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.110.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-4.1.109.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-4.1.110.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.109.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.110.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.109.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.110.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-common-4.1.109.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-common-4.1.110.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-handler-4.1.109.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-handler-4.1.110.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.109.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.110.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.109.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.110.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-transport-4.1.109.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-transport-4.1.110.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index bf1d91f0c6bcc..35cec5437104a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `mockito` from 5.11.0 to 5.12.0 ([#13665](https://github.com/opensearch-project/OpenSearch/pull/13665)) - Bump `com.google.code.gson:gson` from 2.10.1 to 2.11.0 ([#13752](https://github.com/opensearch-project/OpenSearch/pull/13752)) - Bump `ch.qos.logback:logback-core` from 1.5.3 to 1.5.6 ([#13756](https://github.com/opensearch-project/OpenSearch/pull/13756)) +- Bump `netty` from 4.1.109.Final to 4.1.110.Final ([#13802](https://github.com/opensearch-project/OpenSearch/pull/13802)) ### Changed - Add ability for Boolean and date field queries to run when only doc_values are enabled ([#11650](https://github.com/opensearch-project/OpenSearch/pull/11650)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index b1ad7c3f4c650..11f3440d5193a 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -27,7 +27,7 @@ google_http_client = 1.44.1 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.13.0 -netty = 4.1.109.Final +netty = 4.1.110.Final joda = 2.12.7 # project reactor diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index a0ae6619199d2..bd1c1502109cc 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -234,11 +234,14 @@ thirdPartyAudit { 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueConsumerFields', 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueProducerFields', 'io.netty.util.internal.shaded.org.jctools.queues.LinkedQueueNode', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueProducerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueConsumerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerLimitField', 'io.netty.util.internal.shaded.org.jctools.util.UnsafeAccess', 'io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeLongArrayAccess', 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator', 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$1', 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$2', diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.109.Final.jar.sha1 deleted file mode 100644 index 76b51cdae3867..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9d21d602ad7c639fa16b1d26559065d310a34c51 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..faaf70c858a6e --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +3d918a9ee057d995c362902b54634fc307132aac \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.109.Final.jar.sha1 deleted file mode 100644 index 1bccee872152d..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -16e0b2beb49318a549d3ba5d66d707bd5daa8c97 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..7affbc14fa93a --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +f1fa43b03e93ab88e805b6a4e3e83780c80b47d2 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.109.Final.jar.sha1 deleted file mode 100644 index 3423fb94e8497..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6dca43cedc0b2dc6bf57bdc85fce6ffca3e6b72a \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..07730a5606ce2 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +9d05cd927209ea25bbf342962c00b8e5a828c2a4 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.109.Final.jar.sha1 deleted file mode 100644 index 5172500557f8b..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da63e54ee1ca69abf4206cb74fadef7f50850911 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..568c0aa2a2c03 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +ec361e7e025c029be50c55c8480080cabcbc01e7 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.109.Final.jar.sha1 deleted file mode 100644 index cabe61b300523..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9167863307b3c44cc12262e7b5512de3499b9c4a \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..2d6050dd1e3a5 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +168db749c22652ee7fed1ebf7ec46ce856d75e51 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.109.Final.jar.sha1 deleted file mode 100644 index 14e21cc0cdb60..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -055485ac976e27c8bb67ee111a8490c58f67b70c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..c3ee8087a8b5d --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +66c15921104cda0159b34e316541bc765dfaf3c0 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.109.Final.jar.sha1 deleted file mode 100644 index 6b23d0883e31f..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -79e3b07d58ef03c7a860d48f932b720675aa8bd3 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..32c8fa2b876a2 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +b91f04c39ac14d6a29d07184ef305953ee6e0348 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 deleted file mode 100644 index 5afeb9627c9b5..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da7fe1e6943cbab8ee48df2beadc2c8304f347a2 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..2c468962b1b64 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +a7096e7c0a25a983647909d7513f5d4943d589c0 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.109.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.109.Final.jar.sha1 deleted file mode 100644 index b13a709f1c449..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ee231baee2cc9f1300ecc0d9a1e8bb9b31db02fa \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.110.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..c4ca8f15e85c5 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +381c5bf8b7570c163fa7893a26d02b7ac36ff6eb \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 deleted file mode 100644 index b83ad36222d07..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6bd4a54b69a81356393f6e4621bad40754f8a5a2 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..ebd1e0d52efb2 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +e0849843eb5b1c036b12551baca98a9f7ff847a0 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.109.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.109.Final.jar.sha1 deleted file mode 100644 index 5caf947d87a1b..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7f4f0c0dd54c578af2c613a0db7172bf7dca9c79 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.110.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..9f6e95ba38d2e --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +4d54c8d5b95b14756043efb59b8c3e62ec67aa43 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.109.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.109.Final.jar.sha1 deleted file mode 100644 index e0f52ab04ea84..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a77224107f586a7f9e3dc5d12fc0d4d8f0c04803 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.110.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..f31396d94c2ec --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +b7fb401dd47c79e6b99f2319ac3b561c50c31c30 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.109.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.109.Final.jar.sha1 deleted file mode 100644 index b42cdc2835eb0..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5f4d858234b557b73631a24e562bb89fc5399cad \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.110.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..18d122acd2c44 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +3e687cdc4ecdbbad07508a11b715bdf95fa20939 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 deleted file mode 100644 index 5afeb9627c9b5..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da7fe1e6943cbab8ee48df2beadc2c8304f347a2 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..2c468962b1b64 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +a7096e7c0a25a983647909d7513f5d4943d589c0 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.109.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.109.Final.jar.sha1 deleted file mode 100644 index a874755cc29da..0000000000000 --- a/plugins/repository-hdfs/licenses/netty-all-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3ba1acc8ff088334f2ac5556663f8b737eb8b571 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.110.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..8f8d86e6065b2 --- /dev/null +++ b/plugins/repository-hdfs/licenses/netty-all-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +db3f4d3ad3d16e26991a64d50b749ae09e0e0c8e \ No newline at end of file diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 560d12d14395d..00decbe4fa9cd 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -577,11 +577,14 @@ thirdPartyAudit { 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueConsumerFields', 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueProducerFields', 'io.netty.util.internal.shaded.org.jctools.queues.LinkedQueueNode', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueProducerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueConsumerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerLimitField', 'io.netty.util.internal.shaded.org.jctools.util.UnsafeAccess', 'io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeLongArrayAccess', ) } diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.109.Final.jar.sha1 deleted file mode 100644 index 76b51cdae3867..0000000000000 --- a/plugins/repository-s3/licenses/netty-buffer-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9d21d602ad7c639fa16b1d26559065d310a34c51 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..faaf70c858a6e --- /dev/null +++ b/plugins/repository-s3/licenses/netty-buffer-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +3d918a9ee057d995c362902b54634fc307132aac \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.109.Final.jar.sha1 deleted file mode 100644 index 1bccee872152d..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -16e0b2beb49318a549d3ba5d66d707bd5daa8c97 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..7affbc14fa93a --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +f1fa43b03e93ab88e805b6a4e3e83780c80b47d2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.109.Final.jar.sha1 deleted file mode 100644 index 3423fb94e8497..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6dca43cedc0b2dc6bf57bdc85fce6ffca3e6b72a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..07730a5606ce2 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +9d05cd927209ea25bbf342962c00b8e5a828c2a4 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 deleted file mode 100644 index b83ad36222d07..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6bd4a54b69a81356393f6e4621bad40754f8a5a2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..ebd1e0d52efb2 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +e0849843eb5b1c036b12551baca98a9f7ff847a0 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.109.Final.jar.sha1 deleted file mode 100644 index 5172500557f8b..0000000000000 --- a/plugins/repository-s3/licenses/netty-common-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da63e54ee1ca69abf4206cb74fadef7f50850911 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..568c0aa2a2c03 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-common-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +ec361e7e025c029be50c55c8480080cabcbc01e7 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.109.Final.jar.sha1 deleted file mode 100644 index cabe61b300523..0000000000000 --- a/plugins/repository-s3/licenses/netty-handler-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9167863307b3c44cc12262e7b5512de3499b9c4a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..2d6050dd1e3a5 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-handler-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +168db749c22652ee7fed1ebf7ec46ce856d75e51 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.109.Final.jar.sha1 deleted file mode 100644 index 14e21cc0cdb60..0000000000000 --- a/plugins/repository-s3/licenses/netty-resolver-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -055485ac976e27c8bb67ee111a8490c58f67b70c \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..c3ee8087a8b5d --- /dev/null +++ b/plugins/repository-s3/licenses/netty-resolver-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +66c15921104cda0159b34e316541bc765dfaf3c0 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.109.Final.jar.sha1 deleted file mode 100644 index 6b23d0883e31f..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -79e3b07d58ef03c7a860d48f932b720675aa8bd3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..32c8fa2b876a2 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +b91f04c39ac14d6a29d07184ef305953ee6e0348 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.109.Final.jar.sha1 deleted file mode 100644 index 83fc39246ef0a..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7307c8acbc9b331fce3496750a5112bdc726fd2a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..408f3aa5d1339 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +3ca1cff0bf82bfd38e89f6946e54f24cbb3424a2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 deleted file mode 100644 index 5afeb9627c9b5..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da7fe1e6943cbab8ee48df2beadc2c8304f347a2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..2c468962b1b64 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +a7096e7c0a25a983647909d7513f5d4943d589c0 \ No newline at end of file diff --git a/plugins/transport-nio/build.gradle b/plugins/transport-nio/build.gradle index 8c0ee8ba718ac..ee557aa0efc79 100644 --- a/plugins/transport-nio/build.gradle +++ b/plugins/transport-nio/build.gradle @@ -173,11 +173,14 @@ thirdPartyAudit { 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueConsumerFields', 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueProducerFields', 'io.netty.util.internal.shaded.org.jctools.queues.LinkedQueueNode', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueProducerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueConsumerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerLimitField', 'io.netty.util.internal.shaded.org.jctools.util.UnsafeAccess', 'io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeLongArrayAccess', 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator', 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$1', diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.109.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.109.Final.jar.sha1 deleted file mode 100644 index 76b51cdae3867..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9d21d602ad7c639fa16b1d26559065d310a34c51 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.110.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..faaf70c858a6e --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +3d918a9ee057d995c362902b54634fc307132aac \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.109.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.109.Final.jar.sha1 deleted file mode 100644 index 1bccee872152d..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -16e0b2beb49318a549d3ba5d66d707bd5daa8c97 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.110.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..7affbc14fa93a --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +f1fa43b03e93ab88e805b6a4e3e83780c80b47d2 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.109.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.109.Final.jar.sha1 deleted file mode 100644 index 3423fb94e8497..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6dca43cedc0b2dc6bf57bdc85fce6ffca3e6b72a \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.110.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..07730a5606ce2 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +9d05cd927209ea25bbf342962c00b8e5a828c2a4 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.109.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.109.Final.jar.sha1 deleted file mode 100644 index 5172500557f8b..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da63e54ee1ca69abf4206cb74fadef7f50850911 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.110.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..568c0aa2a2c03 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +ec361e7e025c029be50c55c8480080cabcbc01e7 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.109.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.109.Final.jar.sha1 deleted file mode 100644 index cabe61b300523..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9167863307b3c44cc12262e7b5512de3499b9c4a \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.110.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..2d6050dd1e3a5 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +168db749c22652ee7fed1ebf7ec46ce856d75e51 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.109.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.109.Final.jar.sha1 deleted file mode 100644 index 14e21cc0cdb60..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -055485ac976e27c8bb67ee111a8490c58f67b70c \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.110.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..c3ee8087a8b5d --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +66c15921104cda0159b34e316541bc765dfaf3c0 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.109.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.109.Final.jar.sha1 deleted file mode 100644 index 6b23d0883e31f..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -79e3b07d58ef03c7a860d48f932b720675aa8bd3 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.110.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..32c8fa2b876a2 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +b91f04c39ac14d6a29d07184ef305953ee6e0348 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/build.gradle b/plugins/transport-reactor-netty4/build.gradle index 7d7eb330b4a55..1a94def3fdff1 100644 --- a/plugins/transport-reactor-netty4/build.gradle +++ b/plugins/transport-reactor-netty4/build.gradle @@ -249,11 +249,14 @@ thirdPartyAudit { 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueConsumerFields', 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueProducerFields', 'io.netty.util.internal.shaded.org.jctools.queues.LinkedQueueNode', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueProducerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueConsumerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerLimitField', 'io.netty.util.internal.shaded.org.jctools.util.UnsafeAccess', 'io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeLongArrayAccess', 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator', 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$1', 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$2', diff --git a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.109.Final.jar.sha1 deleted file mode 100644 index 76b51cdae3867..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9d21d602ad7c639fa16b1d26559065d310a34c51 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..faaf70c858a6e --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +3d918a9ee057d995c362902b54634fc307132aac \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.109.Final.jar.sha1 deleted file mode 100644 index 1bccee872152d..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -16e0b2beb49318a549d3ba5d66d707bd5daa8c97 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..7affbc14fa93a --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +f1fa43b03e93ab88e805b6a4e3e83780c80b47d2 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.109.Final.jar.sha1 deleted file mode 100644 index b13a709f1c449..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ee231baee2cc9f1300ecc0d9a1e8bb9b31db02fa \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..c4ca8f15e85c5 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +381c5bf8b7570c163fa7893a26d02b7ac36ff6eb \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.109.Final.jar.sha1 deleted file mode 100644 index 3423fb94e8497..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6dca43cedc0b2dc6bf57bdc85fce6ffca3e6b72a \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..07730a5606ce2 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +9d05cd927209ea25bbf342962c00b8e5a828c2a4 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 deleted file mode 100644 index b83ad36222d07..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6bd4a54b69a81356393f6e4621bad40754f8a5a2 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..ebd1e0d52efb2 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +e0849843eb5b1c036b12551baca98a9f7ff847a0 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.109.Final.jar.sha1 deleted file mode 100644 index 5172500557f8b..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da63e54ee1ca69abf4206cb74fadef7f50850911 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..568c0aa2a2c03 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +ec361e7e025c029be50c55c8480080cabcbc01e7 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.109.Final.jar.sha1 deleted file mode 100644 index cabe61b300523..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9167863307b3c44cc12262e7b5512de3499b9c4a \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..2d6050dd1e3a5 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +168db749c22652ee7fed1ebf7ec46ce856d75e51 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.109.Final.jar.sha1 deleted file mode 100644 index 14e21cc0cdb60..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -055485ac976e27c8bb67ee111a8490c58f67b70c \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..c3ee8087a8b5d --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +66c15921104cda0159b34e316541bc765dfaf3c0 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.109.Final.jar.sha1 deleted file mode 100644 index b42cdc2835eb0..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5f4d858234b557b73631a24e562bb89fc5399cad \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..18d122acd2c44 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +3e687cdc4ecdbbad07508a11b715bdf95fa20939 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.109.Final.jar.sha1 deleted file mode 100644 index 6b23d0883e31f..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -79e3b07d58ef03c7a860d48f932b720675aa8bd3 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..32c8fa2b876a2 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +b91f04c39ac14d6a29d07184ef305953ee6e0348 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 deleted file mode 100644 index 5afeb9627c9b5..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.109.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da7fe1e6943cbab8ee48df2beadc2c8304f347a2 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 new file mode 100644 index 0000000000000..2c468962b1b64 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 @@ -0,0 +1 @@ +a7096e7c0a25a983647909d7513f5d4943d589c0 \ No newline at end of file From 4f4f7a680b2789f7b86332e617c35b27fcbb9fab Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Fri, 24 May 2024 16:54:09 -0400 Subject: [PATCH 06/10] Bump jackson from 2.17.0 to 2.17.1 (#13817) (#13823) (cherry picked from commit 56d8dc65da163222ba9a3ba1b5c980422787b6f4) Signed-off-by: Andriy Redko Signed-off-by: github-actions[bot] Co-authored-by: github-actions[bot] --- CHANGELOG.md | 1 + buildSrc/version.properties | 4 ++-- client/sniffer/licenses/jackson-core-2.17.0.jar.sha1 | 1 - client/sniffer/licenses/jackson-core-2.17.1.jar.sha1 | 1 + .../upgrade-cli/licenses/jackson-annotations-2.17.0.jar.sha1 | 1 - .../upgrade-cli/licenses/jackson-annotations-2.17.1.jar.sha1 | 1 + .../upgrade-cli/licenses/jackson-databind-2.17.0.jar.sha1 | 1 - .../upgrade-cli/licenses/jackson-databind-2.17.1.jar.sha1 | 1 + libs/core/licenses/jackson-core-2.17.0.jar.sha1 | 1 - libs/core/licenses/jackson-core-2.17.1.jar.sha1 | 1 + libs/x-content/licenses/jackson-core-2.17.0.jar.sha1 | 1 - libs/x-content/licenses/jackson-core-2.17.1.jar.sha1 | 1 + .../licenses/jackson-dataformat-cbor-2.17.0.jar.sha1 | 1 - .../licenses/jackson-dataformat-cbor-2.17.1.jar.sha1 | 1 + .../licenses/jackson-dataformat-smile-2.17.0.jar.sha1 | 1 - .../licenses/jackson-dataformat-smile-2.17.1.jar.sha1 | 1 + .../licenses/jackson-dataformat-yaml-2.17.0.jar.sha1 | 1 - .../licenses/jackson-dataformat-yaml-2.17.1.jar.sha1 | 1 + .../ingest-geoip/licenses/jackson-annotations-2.17.0.jar.sha1 | 1 - .../ingest-geoip/licenses/jackson-annotations-2.17.1.jar.sha1 | 1 + .../ingest-geoip/licenses/jackson-databind-2.17.0.jar.sha1 | 1 - .../ingest-geoip/licenses/jackson-databind-2.17.1.jar.sha1 | 1 + .../crypto-kms/licenses/jackson-annotations-2.17.0.jar.sha1 | 1 - .../crypto-kms/licenses/jackson-annotations-2.17.1.jar.sha1 | 1 + plugins/crypto-kms/licenses/jackson-databind-2.17.0.jar.sha1 | 1 - plugins/crypto-kms/licenses/jackson-databind-2.17.1.jar.sha1 | 1 + .../licenses/jackson-annotations-2.17.0.jar.sha1 | 1 - .../licenses/jackson-annotations-2.17.1.jar.sha1 | 1 + .../discovery-ec2/licenses/jackson-databind-2.17.0.jar.sha1 | 1 - .../discovery-ec2/licenses/jackson-databind-2.17.1.jar.sha1 | 1 + .../licenses/jackson-annotations-2.17.0.jar.sha1 | 1 - .../licenses/jackson-annotations-2.17.1.jar.sha1 | 1 + .../licenses/jackson-databind-2.17.0.jar.sha1 | 1 - .../licenses/jackson-databind-2.17.1.jar.sha1 | 1 + .../licenses/jackson-dataformat-xml-2.17.0.jar.sha1 | 1 - .../licenses/jackson-dataformat-xml-2.17.1.jar.sha1 | 1 + .../licenses/jackson-datatype-jsr310-2.17.0.jar.sha1 | 1 - .../licenses/jackson-datatype-jsr310-2.17.1.jar.sha1 | 1 + .../licenses/jackson-module-jaxb-annotations-2.17.0.jar.sha1 | 1 - .../licenses/jackson-module-jaxb-annotations-2.17.1.jar.sha1 | 1 + .../licenses/jackson-annotations-2.17.0.jar.sha1 | 1 - .../licenses/jackson-annotations-2.17.1.jar.sha1 | 1 + .../repository-s3/licenses/jackson-databind-2.17.0.jar.sha1 | 1 - .../repository-s3/licenses/jackson-databind-2.17.1.jar.sha1 | 1 + 44 files changed, 24 insertions(+), 23 deletions(-) delete mode 100644 client/sniffer/licenses/jackson-core-2.17.0.jar.sha1 create mode 100644 client/sniffer/licenses/jackson-core-2.17.1.jar.sha1 delete mode 100644 distribution/tools/upgrade-cli/licenses/jackson-annotations-2.17.0.jar.sha1 create mode 100644 distribution/tools/upgrade-cli/licenses/jackson-annotations-2.17.1.jar.sha1 delete mode 100644 distribution/tools/upgrade-cli/licenses/jackson-databind-2.17.0.jar.sha1 create mode 100644 distribution/tools/upgrade-cli/licenses/jackson-databind-2.17.1.jar.sha1 delete mode 100644 libs/core/licenses/jackson-core-2.17.0.jar.sha1 create mode 100644 libs/core/licenses/jackson-core-2.17.1.jar.sha1 delete mode 100644 libs/x-content/licenses/jackson-core-2.17.0.jar.sha1 create mode 100644 libs/x-content/licenses/jackson-core-2.17.1.jar.sha1 delete mode 100644 libs/x-content/licenses/jackson-dataformat-cbor-2.17.0.jar.sha1 create mode 100644 libs/x-content/licenses/jackson-dataformat-cbor-2.17.1.jar.sha1 delete mode 100644 libs/x-content/licenses/jackson-dataformat-smile-2.17.0.jar.sha1 create mode 100644 libs/x-content/licenses/jackson-dataformat-smile-2.17.1.jar.sha1 delete mode 100644 libs/x-content/licenses/jackson-dataformat-yaml-2.17.0.jar.sha1 create mode 100644 libs/x-content/licenses/jackson-dataformat-yaml-2.17.1.jar.sha1 delete mode 100644 modules/ingest-geoip/licenses/jackson-annotations-2.17.0.jar.sha1 create mode 100644 modules/ingest-geoip/licenses/jackson-annotations-2.17.1.jar.sha1 delete mode 100644 modules/ingest-geoip/licenses/jackson-databind-2.17.0.jar.sha1 create mode 100644 modules/ingest-geoip/licenses/jackson-databind-2.17.1.jar.sha1 delete mode 100644 plugins/crypto-kms/licenses/jackson-annotations-2.17.0.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/jackson-annotations-2.17.1.jar.sha1 delete mode 100644 plugins/crypto-kms/licenses/jackson-databind-2.17.0.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/jackson-databind-2.17.1.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/jackson-annotations-2.17.0.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/jackson-annotations-2.17.1.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/jackson-databind-2.17.0.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/jackson-databind-2.17.1.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/jackson-annotations-2.17.0.jar.sha1 create mode 100644 plugins/repository-azure/licenses/jackson-annotations-2.17.1.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/jackson-databind-2.17.0.jar.sha1 create mode 100644 plugins/repository-azure/licenses/jackson-databind-2.17.1.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/jackson-dataformat-xml-2.17.0.jar.sha1 create mode 100644 plugins/repository-azure/licenses/jackson-dataformat-xml-2.17.1.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/jackson-datatype-jsr310-2.17.0.jar.sha1 create mode 100644 plugins/repository-azure/licenses/jackson-datatype-jsr310-2.17.1.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.17.0.jar.sha1 create mode 100644 plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.17.1.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/jackson-annotations-2.17.0.jar.sha1 create mode 100644 plugins/repository-s3/licenses/jackson-annotations-2.17.1.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/jackson-databind-2.17.0.jar.sha1 create mode 100644 plugins/repository-s3/licenses/jackson-databind-2.17.1.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 35cec5437104a..a273ad1494b72 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.google.code.gson:gson` from 2.10.1 to 2.11.0 ([#13752](https://github.com/opensearch-project/OpenSearch/pull/13752)) - Bump `ch.qos.logback:logback-core` from 1.5.3 to 1.5.6 ([#13756](https://github.com/opensearch-project/OpenSearch/pull/13756)) - Bump `netty` from 4.1.109.Final to 4.1.110.Final ([#13802](https://github.com/opensearch-project/OpenSearch/pull/13802)) +- Bump `jackson` from 2.17.0 to 2.17.1 ([#13817](https://github.com/opensearch-project/OpenSearch/pull/13817)) ### Changed - Add ability for Boolean and date field queries to run when only doc_values are enabled ([#11650](https://github.com/opensearch-project/OpenSearch/pull/11650)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 11f3440d5193a..cd8934318038a 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -7,8 +7,8 @@ bundled_jdk = 21.0.3+9 # optional dependencies spatial4j = 0.7 jts = 1.15.0 -jackson = 2.17.0 -jackson_databind = 2.17.0 +jackson = 2.17.1 +jackson_databind = 2.17.1 snakeyaml = 2.1 icu4j = 70.1 supercsv = 2.4.0 diff --git a/client/sniffer/licenses/jackson-core-2.17.0.jar.sha1 b/client/sniffer/licenses/jackson-core-2.17.0.jar.sha1 deleted file mode 100644 index 9b906dbda1656..0000000000000 --- a/client/sniffer/licenses/jackson-core-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a6e5058ef9720623c517252d17162f845306ff3a \ No newline at end of file diff --git a/client/sniffer/licenses/jackson-core-2.17.1.jar.sha1 b/client/sniffer/licenses/jackson-core-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..82dab5981e652 --- /dev/null +++ b/client/sniffer/licenses/jackson-core-2.17.1.jar.sha1 @@ -0,0 +1 @@ +5e52a11644cd59a28ef79f02bddc2cc3bab45edb \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.17.0.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.17.0.jar.sha1 deleted file mode 100644 index 66bf7ed6ecce8..0000000000000 --- a/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -880a742337010da4c851f843d8cac150e22dff9f \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.17.1.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..4ceead1b7ae4f --- /dev/null +++ b/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.17.1.jar.sha1 @@ -0,0 +1 @@ +fca7ef6192c9ad05d07bc50da991bf937a84af3a \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.17.0.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.17.0.jar.sha1 deleted file mode 100644 index c0e4bb0c56849..0000000000000 --- a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7173e9e1d4bc6d7ca03bc4eeedcd548b8b580b34 \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.17.1.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..7cf1ac1b60301 --- /dev/null +++ b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.17.1.jar.sha1 @@ -0,0 +1 @@ +0524dcbcccdde7d45a679dfc333e4763feb09079 \ No newline at end of file diff --git a/libs/core/licenses/jackson-core-2.17.0.jar.sha1 b/libs/core/licenses/jackson-core-2.17.0.jar.sha1 deleted file mode 100644 index 9b906dbda1656..0000000000000 --- a/libs/core/licenses/jackson-core-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a6e5058ef9720623c517252d17162f845306ff3a \ No newline at end of file diff --git a/libs/core/licenses/jackson-core-2.17.1.jar.sha1 b/libs/core/licenses/jackson-core-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..82dab5981e652 --- /dev/null +++ b/libs/core/licenses/jackson-core-2.17.1.jar.sha1 @@ -0,0 +1 @@ +5e52a11644cd59a28ef79f02bddc2cc3bab45edb \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-core-2.17.0.jar.sha1 b/libs/x-content/licenses/jackson-core-2.17.0.jar.sha1 deleted file mode 100644 index 9b906dbda1656..0000000000000 --- a/libs/x-content/licenses/jackson-core-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a6e5058ef9720623c517252d17162f845306ff3a \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-core-2.17.1.jar.sha1 b/libs/x-content/licenses/jackson-core-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..82dab5981e652 --- /dev/null +++ b/libs/x-content/licenses/jackson-core-2.17.1.jar.sha1 @@ -0,0 +1 @@ +5e52a11644cd59a28ef79f02bddc2cc3bab45edb \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-cbor-2.17.0.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-cbor-2.17.0.jar.sha1 deleted file mode 100644 index 382e20d3d31c1..0000000000000 --- a/libs/x-content/licenses/jackson-dataformat-cbor-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6833c8573452d583e4af650a7424d547606b2501 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-cbor-2.17.1.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-cbor-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..ff42ed1f92cfe --- /dev/null +++ b/libs/x-content/licenses/jackson-dataformat-cbor-2.17.1.jar.sha1 @@ -0,0 +1 @@ +ba5d8e6ecc62aa0e49c0ce935b8696352dbebc71 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-smile-2.17.0.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-smile-2.17.0.jar.sha1 deleted file mode 100644 index d117479166d17..0000000000000 --- a/libs/x-content/licenses/jackson-dataformat-smile-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f10183857607fde789490d33ea46372a2d2b0c72 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-smile-2.17.1.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-smile-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..47d19067cf2a6 --- /dev/null +++ b/libs/x-content/licenses/jackson-dataformat-smile-2.17.1.jar.sha1 @@ -0,0 +1 @@ +89683ac4f0a0c2c4f69ea56b90480ed40266dac8 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-yaml-2.17.0.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-yaml-2.17.0.jar.sha1 deleted file mode 100644 index 35242eed9b212..0000000000000 --- a/libs/x-content/licenses/jackson-dataformat-yaml-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -57a963c6258c49febc11390082d8503f71bb15a9 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-yaml-2.17.1.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-yaml-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..7946e994c7104 --- /dev/null +++ b/libs/x-content/licenses/jackson-dataformat-yaml-2.17.1.jar.sha1 @@ -0,0 +1 @@ +b4c7b8a9ea3f398116a75c146b982b22afebc4ee \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-annotations-2.17.0.jar.sha1 b/modules/ingest-geoip/licenses/jackson-annotations-2.17.0.jar.sha1 deleted file mode 100644 index 66bf7ed6ecce8..0000000000000 --- a/modules/ingest-geoip/licenses/jackson-annotations-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -880a742337010da4c851f843d8cac150e22dff9f \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-annotations-2.17.1.jar.sha1 b/modules/ingest-geoip/licenses/jackson-annotations-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..4ceead1b7ae4f --- /dev/null +++ b/modules/ingest-geoip/licenses/jackson-annotations-2.17.1.jar.sha1 @@ -0,0 +1 @@ +fca7ef6192c9ad05d07bc50da991bf937a84af3a \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-databind-2.17.0.jar.sha1 b/modules/ingest-geoip/licenses/jackson-databind-2.17.0.jar.sha1 deleted file mode 100644 index c0e4bb0c56849..0000000000000 --- a/modules/ingest-geoip/licenses/jackson-databind-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7173e9e1d4bc6d7ca03bc4eeedcd548b8b580b34 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-databind-2.17.1.jar.sha1 b/modules/ingest-geoip/licenses/jackson-databind-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..7cf1ac1b60301 --- /dev/null +++ b/modules/ingest-geoip/licenses/jackson-databind-2.17.1.jar.sha1 @@ -0,0 +1 @@ +0524dcbcccdde7d45a679dfc333e4763feb09079 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/jackson-annotations-2.17.0.jar.sha1 b/plugins/crypto-kms/licenses/jackson-annotations-2.17.0.jar.sha1 deleted file mode 100644 index 66bf7ed6ecce8..0000000000000 --- a/plugins/crypto-kms/licenses/jackson-annotations-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -880a742337010da4c851f843d8cac150e22dff9f \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/jackson-annotations-2.17.1.jar.sha1 b/plugins/crypto-kms/licenses/jackson-annotations-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..4ceead1b7ae4f --- /dev/null +++ b/plugins/crypto-kms/licenses/jackson-annotations-2.17.1.jar.sha1 @@ -0,0 +1 @@ +fca7ef6192c9ad05d07bc50da991bf937a84af3a \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/jackson-databind-2.17.0.jar.sha1 b/plugins/crypto-kms/licenses/jackson-databind-2.17.0.jar.sha1 deleted file mode 100644 index c0e4bb0c56849..0000000000000 --- a/plugins/crypto-kms/licenses/jackson-databind-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7173e9e1d4bc6d7ca03bc4eeedcd548b8b580b34 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/jackson-databind-2.17.1.jar.sha1 b/plugins/crypto-kms/licenses/jackson-databind-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..7cf1ac1b60301 --- /dev/null +++ b/plugins/crypto-kms/licenses/jackson-databind-2.17.1.jar.sha1 @@ -0,0 +1 @@ +0524dcbcccdde7d45a679dfc333e4763feb09079 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-annotations-2.17.0.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-annotations-2.17.0.jar.sha1 deleted file mode 100644 index 66bf7ed6ecce8..0000000000000 --- a/plugins/discovery-ec2/licenses/jackson-annotations-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -880a742337010da4c851f843d8cac150e22dff9f \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-annotations-2.17.1.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-annotations-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..4ceead1b7ae4f --- /dev/null +++ b/plugins/discovery-ec2/licenses/jackson-annotations-2.17.1.jar.sha1 @@ -0,0 +1 @@ +fca7ef6192c9ad05d07bc50da991bf937a84af3a \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-databind-2.17.0.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-databind-2.17.0.jar.sha1 deleted file mode 100644 index c0e4bb0c56849..0000000000000 --- a/plugins/discovery-ec2/licenses/jackson-databind-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7173e9e1d4bc6d7ca03bc4eeedcd548b8b580b34 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-databind-2.17.1.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-databind-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..7cf1ac1b60301 --- /dev/null +++ b/plugins/discovery-ec2/licenses/jackson-databind-2.17.1.jar.sha1 @@ -0,0 +1 @@ +0524dcbcccdde7d45a679dfc333e4763feb09079 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-annotations-2.17.0.jar.sha1 b/plugins/repository-azure/licenses/jackson-annotations-2.17.0.jar.sha1 deleted file mode 100644 index 66bf7ed6ecce8..0000000000000 --- a/plugins/repository-azure/licenses/jackson-annotations-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -880a742337010da4c851f843d8cac150e22dff9f \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-annotations-2.17.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-annotations-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..4ceead1b7ae4f --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-annotations-2.17.1.jar.sha1 @@ -0,0 +1 @@ +fca7ef6192c9ad05d07bc50da991bf937a84af3a \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-databind-2.17.0.jar.sha1 b/plugins/repository-azure/licenses/jackson-databind-2.17.0.jar.sha1 deleted file mode 100644 index c0e4bb0c56849..0000000000000 --- a/plugins/repository-azure/licenses/jackson-databind-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7173e9e1d4bc6d7ca03bc4eeedcd548b8b580b34 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-databind-2.17.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-databind-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..7cf1ac1b60301 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-databind-2.17.1.jar.sha1 @@ -0,0 +1 @@ +0524dcbcccdde7d45a679dfc333e4763feb09079 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-dataformat-xml-2.17.0.jar.sha1 b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.17.0.jar.sha1 deleted file mode 100644 index 9dea3dfc55691..0000000000000 --- a/plugins/repository-azure/licenses/jackson-dataformat-xml-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fbe3c274a39cef5538ca8688ac7e2ad0053a6ffa \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-dataformat-xml-2.17.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..3915ab2616beb --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.17.1.jar.sha1 @@ -0,0 +1 @@ +e6a168dba62aa63743b9e2b83f4e0f0dfdc143d3 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.17.0.jar.sha1 b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.17.0.jar.sha1 deleted file mode 100644 index fe8e51b8e0869..0000000000000 --- a/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3fab507bba9d477e52ed2302dc3ddbd23cbae339 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.17.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..db26ebbf738f7 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.17.1.jar.sha1 @@ -0,0 +1 @@ +0969b0c3cb8c75d759e9a6c585c44c9b9f3a4f75 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.17.0.jar.sha1 b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.17.0.jar.sha1 deleted file mode 100644 index 3954ac9c39af3..0000000000000 --- a/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e07032ce170277213ac4835169ca79fa0340c7b5 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.17.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..bb8ecfe34d295 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.17.1.jar.sha1 @@ -0,0 +1 @@ +f77e7bf0e64dfcf53bfdcf2764ad7ab92b78a4da \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-annotations-2.17.0.jar.sha1 b/plugins/repository-s3/licenses/jackson-annotations-2.17.0.jar.sha1 deleted file mode 100644 index 66bf7ed6ecce8..0000000000000 --- a/plugins/repository-s3/licenses/jackson-annotations-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -880a742337010da4c851f843d8cac150e22dff9f \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-annotations-2.17.1.jar.sha1 b/plugins/repository-s3/licenses/jackson-annotations-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..4ceead1b7ae4f --- /dev/null +++ b/plugins/repository-s3/licenses/jackson-annotations-2.17.1.jar.sha1 @@ -0,0 +1 @@ +fca7ef6192c9ad05d07bc50da991bf937a84af3a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-databind-2.17.0.jar.sha1 b/plugins/repository-s3/licenses/jackson-databind-2.17.0.jar.sha1 deleted file mode 100644 index c0e4bb0c56849..0000000000000 --- a/plugins/repository-s3/licenses/jackson-databind-2.17.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7173e9e1d4bc6d7ca03bc4eeedcd548b8b580b34 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-databind-2.17.1.jar.sha1 b/plugins/repository-s3/licenses/jackson-databind-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..7cf1ac1b60301 --- /dev/null +++ b/plugins/repository-s3/licenses/jackson-databind-2.17.1.jar.sha1 @@ -0,0 +1 @@ +0524dcbcccdde7d45a679dfc333e4763feb09079 \ No newline at end of file From be716a44e7b3bc0de821a97c2f1ca66440f3f051 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Sat, 25 May 2024 10:56:09 -0400 Subject: [PATCH 07/10] Bump reactor from 3.5.15 to 3.5.17 and reactor-netty from 1.1.17 to 1.1.19 (#13825) (#13830) (cherry picked from commit 1c0a274e2abe2ad353dcc3a5c5d0e787310bce1d) Signed-off-by: Andriy Redko Signed-off-by: github-actions[bot] Co-authored-by: github-actions[bot] --- CHANGELOG.md | 2 ++ buildSrc/version.properties | 4 ++-- .../licenses/reactor-netty-core-1.1.17.jar.sha1 | 1 - .../licenses/reactor-netty-core-1.1.19.jar.sha1 | 1 + .../licenses/reactor-netty-http-1.1.17.jar.sha1 | 1 - .../licenses/reactor-netty-http-1.1.19.jar.sha1 | 1 + .../licenses/reactor-netty-core-1.1.17.jar.sha1 | 1 - .../licenses/reactor-netty-core-1.1.19.jar.sha1 | 1 + .../licenses/reactor-netty-http-1.1.17.jar.sha1 | 1 - .../licenses/reactor-netty-http-1.1.19.jar.sha1 | 1 + server/licenses/reactor-core-3.5.15.jar.sha1 | 1 - server/licenses/reactor-core-3.5.17.jar.sha1 | 1 + 12 files changed, 9 insertions(+), 7 deletions(-) delete mode 100644 plugins/repository-azure/licenses/reactor-netty-core-1.1.17.jar.sha1 create mode 100644 plugins/repository-azure/licenses/reactor-netty-core-1.1.19.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/reactor-netty-http-1.1.17.jar.sha1 create mode 100644 plugins/repository-azure/licenses/reactor-netty-http-1.1.19.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.17.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.19.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.17.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.19.jar.sha1 delete mode 100644 server/licenses/reactor-core-3.5.15.jar.sha1 create mode 100644 server/licenses/reactor-core-3.5.17.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index a273ad1494b72..15bb78244a41b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `ch.qos.logback:logback-core` from 1.5.3 to 1.5.6 ([#13756](https://github.com/opensearch-project/OpenSearch/pull/13756)) - Bump `netty` from 4.1.109.Final to 4.1.110.Final ([#13802](https://github.com/opensearch-project/OpenSearch/pull/13802)) - Bump `jackson` from 2.17.0 to 2.17.1 ([#13817](https://github.com/opensearch-project/OpenSearch/pull/13817)) +- Bump `reactor` from 3.5.15 to 3.5.17 ([#13825](https://github.com/opensearch-project/OpenSearch/pull/13825)) +- Bump `reactor-netty` from 1.1.17 to 1.1.19 ([#13825](https://github.com/opensearch-project/OpenSearch/pull/13825)) ### Changed - Add ability for Boolean and date field queries to run when only doc_values are enabled ([#11650](https://github.com/opensearch-project/OpenSearch/pull/11650)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index cd8934318038a..96d4f2a39f66b 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -31,8 +31,8 @@ netty = 4.1.110.Final joda = 2.12.7 # project reactor -reactor_netty = 1.1.17 -reactor = 3.5.15 +reactor_netty = 1.1.19 +reactor = 3.5.17 # client dependencies httpclient = 4.5.14 diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.17.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.17.jar.sha1 deleted file mode 100644 index 3d631bc904f24..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-core-1.1.17.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -319b1d41f28e92b31b7ca0f19183337f5539bb44 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.19.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.19.jar.sha1 new file mode 100644 index 0000000000000..cbcbfcd87d682 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-core-1.1.19.jar.sha1 @@ -0,0 +1 @@ +639e2c63ade6f2a49d7e501ca2264b74d240b448 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.17.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.17.jar.sha1 deleted file mode 100644 index 9ceef6959744b..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-http-1.1.17.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9ed949dcd050ef30d9eeedd53d95d1dce20ce832 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.19.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.19.jar.sha1 new file mode 100644 index 0000000000000..1eeedfc0926f5 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-http-1.1.19.jar.sha1 @@ -0,0 +1 @@ +b4bbb1aeb64ecb2b3949c38983032a7f0b0ebd07 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.17.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.17.jar.sha1 deleted file mode 100644 index 3d631bc904f24..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.17.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -319b1d41f28e92b31b7ca0f19183337f5539bb44 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.19.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.19.jar.sha1 new file mode 100644 index 0000000000000..cbcbfcd87d682 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.19.jar.sha1 @@ -0,0 +1 @@ +639e2c63ade6f2a49d7e501ca2264b74d240b448 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.17.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.17.jar.sha1 deleted file mode 100644 index 9ceef6959744b..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.17.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9ed949dcd050ef30d9eeedd53d95d1dce20ce832 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.19.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.19.jar.sha1 new file mode 100644 index 0000000000000..1eeedfc0926f5 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.19.jar.sha1 @@ -0,0 +1 @@ +b4bbb1aeb64ecb2b3949c38983032a7f0b0ebd07 \ No newline at end of file diff --git a/server/licenses/reactor-core-3.5.15.jar.sha1 b/server/licenses/reactor-core-3.5.15.jar.sha1 deleted file mode 100644 index 02df47ed58b9d..0000000000000 --- a/server/licenses/reactor-core-3.5.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4e07a24c671235a2a806e75e9b8ff23d7d1db3d4 \ No newline at end of file diff --git a/server/licenses/reactor-core-3.5.17.jar.sha1 b/server/licenses/reactor-core-3.5.17.jar.sha1 new file mode 100644 index 0000000000000..6663356bab047 --- /dev/null +++ b/server/licenses/reactor-core-3.5.17.jar.sha1 @@ -0,0 +1 @@ +2cf9b080e3a2d8a5a39948260db5fd1dae54c3ac \ No newline at end of file From 0fc94ca3aa9b12558d898ff05b479b360f71ae0f Mon Sep 17 00:00:00 2001 From: Neetika Singhal Date: Sun, 26 May 2024 18:13:53 -0700 Subject: [PATCH 08/10] Refactor writable warm feature flag to include tiering (#13799) Signed-off-by: Neetika Singhal --- .../common/settings/FeatureFlagSettings.java | 6 +++--- .../org/opensearch/common/util/FeatureFlags.java | 12 ++++-------- 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java index 0e1b353fb41b1..7a364de1c5dc6 100644 --- a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java @@ -34,8 +34,8 @@ protected FeatureFlagSettings( FeatureFlags.IDENTITY_SETTING, FeatureFlags.TELEMETRY_SETTING, FeatureFlags.DATETIME_FORMATTER_CACHING_SETTING, - FeatureFlags.WRITEABLE_REMOTE_INDEX_SETTING, - FeatureFlags.PLUGGABLE_CACHE_SETTING, - FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING + FeatureFlags.TIERED_REMOTE_INDEX_SETTING, + FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING, + FeatureFlags.PLUGGABLE_CACHE_SETTING ); } diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java index a72583607ede0..62cfbd861d4d9 100644 --- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -56,10 +56,10 @@ public class FeatureFlags { public static final String DATETIME_FORMATTER_CACHING = "opensearch.experimental.optimization.datetime_formatter_caching.enabled"; /** - * Gates the functionality of writeable remote index + * Gates the functionality of remote index having the capability to move across different tiers * Once the feature is ready for release, this feature flag can be removed. */ - public static final String WRITEABLE_REMOTE_INDEX = "opensearch.experimental.feature.writeable_remote_index.enabled"; + public static final String TIERED_REMOTE_INDEX = "opensearch.experimental.feature.tiered_remote_index.enabled"; /** * Gates the functionality of pluggable cache. @@ -85,11 +85,7 @@ public class FeatureFlags { Property.NodeScope ); - public static final Setting WRITEABLE_REMOTE_INDEX_SETTING = Setting.boolSetting( - WRITEABLE_REMOTE_INDEX, - false, - Property.NodeScope - ); + public static final Setting TIERED_REMOTE_INDEX_SETTING = Setting.boolSetting(TIERED_REMOTE_INDEX, false, Property.NodeScope); public static final Setting PLUGGABLE_CACHE_SETTING = Setting.boolSetting(PLUGGABLE_CACHE, false, Property.NodeScope); @@ -99,7 +95,7 @@ public class FeatureFlags { IDENTITY_SETTING, TELEMETRY_SETTING, DATETIME_FORMATTER_CACHING_SETTING, - WRITEABLE_REMOTE_INDEX_SETTING, + TIERED_REMOTE_INDEX_SETTING, PLUGGABLE_CACHE_SETTING ); /** From 6f9ee95148f4bbd285a4d4ad8c591c2db788b4e7 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Mon, 27 May 2024 12:13:19 -0400 Subject: [PATCH 09/10] Bump commons-cli:commons-cli from 1.7.0 to 1.8.0 in /plugins/repository-hdfs (#13840) (#13843) * Bump commons-cli:commons-cli in /plugins/repository-hdfs Bumps commons-cli:commons-cli from 1.7.0 to 1.8.0. --- updated-dependencies: - dependency-name: commons-cli:commons-cli dependency-type: direct:production update-type: version-update:semver-minor ... * Updating SHAs * Update changelog --------- (cherry picked from commit 09c2c7d798bf12ee8e422720cc56d0bb86cea393) Signed-off-by: dependabot[bot] Signed-off-by: github-actions[bot] Co-authored-by: github-actions[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + plugins/repository-hdfs/build.gradle | 2 +- plugins/repository-hdfs/licenses/commons-cli-1.7.0.jar.sha1 | 1 - plugins/repository-hdfs/licenses/commons-cli-1.8.0.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-hdfs/licenses/commons-cli-1.7.0.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/commons-cli-1.8.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 15bb78244a41b..c8c77bd846640 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `jackson` from 2.17.0 to 2.17.1 ([#13817](https://github.com/opensearch-project/OpenSearch/pull/13817)) - Bump `reactor` from 3.5.15 to 3.5.17 ([#13825](https://github.com/opensearch-project/OpenSearch/pull/13825)) - Bump `reactor-netty` from 1.1.17 to 1.1.19 ([#13825](https://github.com/opensearch-project/OpenSearch/pull/13825)) +- Bump `commons-cli:commons-cli` from 1.7.0 to 1.8.0 ([#13840](https://github.com/opensearch-project/OpenSearch/pull/13840)) ### Changed - Add ability for Boolean and date field queries to run when only doc_values are enabled ([#11650](https://github.com/opensearch-project/OpenSearch/pull/11650)) diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index d3e4c6c9e3d03..3ab8f48cd5b9f 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -70,7 +70,7 @@ dependencies { api 'com.google.code.gson:gson:2.11.0' runtimeOnly "com.google.guava:guava:${versions.guava}" api "commons-logging:commons-logging:${versions.commonslogging}" - api 'commons-cli:commons-cli:1.7.0' + api 'commons-cli:commons-cli:1.8.0' api "commons-codec:commons-codec:${versions.commonscodec}" api 'commons-collections:commons-collections:3.2.2' api "org.apache.commons:commons-compress:${versions.commonscompress}" diff --git a/plugins/repository-hdfs/licenses/commons-cli-1.7.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-cli-1.7.0.jar.sha1 deleted file mode 100644 index 759bc9275d346..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-cli-1.7.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6504b3f17e8bc5adc6b6c8deecc90144d0154075 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-cli-1.8.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-cli-1.8.0.jar.sha1 new file mode 100644 index 0000000000000..65102052409ea --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-cli-1.8.0.jar.sha1 @@ -0,0 +1 @@ +41a4bff12057eecb6daaf9c7f36c237815be3da1 \ No newline at end of file From 17e8935863fa8e3dfc0e248b5ad315d8e8b7266d Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Mon, 27 May 2024 16:09:23 -0400 Subject: [PATCH 10/10] Bump org.apache.xmlbeans:xmlbeans from 5.2.0 to 5.2.1 in /plugins/ingest-attachment (#13839) (#13844) * Bump org.apache.xmlbeans:xmlbeans in /plugins/ingest-attachment Bumps org.apache.xmlbeans:xmlbeans from 5.2.0 to 5.2.1. --- updated-dependencies: - dependency-name: org.apache.xmlbeans:xmlbeans dependency-type: direct:production update-type: version-update:semver-patch ... * Update changelog * Updating SHAs --------- (cherry picked from commit 595959f3442070f797fcffd9bf079e5563575572) Signed-off-by: dependabot[bot] Signed-off-by: github-actions[bot] Co-authored-by: github-actions[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: opensearch-trigger-bot[bot] Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + plugins/ingest-attachment/build.gradle | 2 +- plugins/ingest-attachment/licenses/xmlbeans-5.2.0.jar.sha1 | 1 - plugins/ingest-attachment/licenses/xmlbeans-5.2.1.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/ingest-attachment/licenses/xmlbeans-5.2.0.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/xmlbeans-5.2.1.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index c8c77bd846640..c3f87376fc550 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `reactor` from 3.5.15 to 3.5.17 ([#13825](https://github.com/opensearch-project/OpenSearch/pull/13825)) - Bump `reactor-netty` from 1.1.17 to 1.1.19 ([#13825](https://github.com/opensearch-project/OpenSearch/pull/13825)) - Bump `commons-cli:commons-cli` from 1.7.0 to 1.8.0 ([#13840](https://github.com/opensearch-project/OpenSearch/pull/13840)) +- Bump `org.apache.xmlbeans:xmlbeans` from 5.2.0 to 5.2.1 ([#13839](https://github.com/opensearch-project/OpenSearch/pull/13839)) ### Changed - Add ability for Boolean and date field queries to run when only doc_values are enabled ([#11650](https://github.com/opensearch-project/OpenSearch/pull/11650)) diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 4749aa911886d..d631855013527 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -89,7 +89,7 @@ dependencies { api "org.apache.poi:poi:${versions.poi}" api "org.apache.poi:poi-ooxml-lite:${versions.poi}" api "commons-codec:commons-codec:${versions.commonscodec}" - api 'org.apache.xmlbeans:xmlbeans:5.2.0' + api 'org.apache.xmlbeans:xmlbeans:5.2.1' api 'org.apache.commons:commons-collections4:4.4' // MS Office api "org.apache.poi:poi-scratchpad:${versions.poi}" diff --git a/plugins/ingest-attachment/licenses/xmlbeans-5.2.0.jar.sha1 b/plugins/ingest-attachment/licenses/xmlbeans-5.2.0.jar.sha1 deleted file mode 100644 index f34274d593697..0000000000000 --- a/plugins/ingest-attachment/licenses/xmlbeans-5.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6198ac997b3f234f2b5393fa415f78fac2e06510 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/xmlbeans-5.2.1.jar.sha1 b/plugins/ingest-attachment/licenses/xmlbeans-5.2.1.jar.sha1 new file mode 100644 index 0000000000000..eaab556163e5c --- /dev/null +++ b/plugins/ingest-attachment/licenses/xmlbeans-5.2.1.jar.sha1 @@ -0,0 +1 @@ +e16ddf17fe181c202b097e0dcc0ee2fed91cb7da \ No newline at end of file