diff --git a/modules/repository-url/src/main/java/org/opensearch/repositories/url/URLRepository.java b/modules/repository-url/src/main/java/org/opensearch/repositories/url/URLRepository.java
index 9e9d94c8e8fc0..4c8d8aab4532b 100644
--- a/modules/repository-url/src/main/java/org/opensearch/repositories/url/URLRepository.java
+++ b/modules/repository-url/src/main/java/org/opensearch/repositories/url/URLRepository.java
@@ -113,7 +113,7 @@ public URLRepository(
ClusterService clusterService,
RecoverySettings recoverySettings
) {
- super(metadata, false, namedXContentRegistry, clusterService, recoverySettings);
+ super(metadata, namedXContentRegistry, clusterService, recoverySettings);
if (URL_SETTING.exists(metadata.settings()) == false && REPOSITORIES_URL_SETTING.exists(environment.settings()) == false) {
throw new RepositoryException(metadata.name(), "missing url");
diff --git a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureRepository.java b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureRepository.java
index 65852c4fc5bd0..22535d881ccaf 100644
--- a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureRepository.java
+++ b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureRepository.java
@@ -114,14 +114,7 @@ public AzureRepository(
final ClusterService clusterService,
final RecoverySettings recoverySettings
) {
- super(
- metadata,
- COMPRESS_SETTING.get(metadata.settings()),
- namedXContentRegistry,
- clusterService,
- recoverySettings,
- buildLocation(metadata)
- );
+ super(metadata, namedXContentRegistry, clusterService, recoverySettings, buildLocation(metadata));
this.chunkSize = Repository.CHUNK_SIZE_SETTING.get(metadata.settings());
this.storageService = storageService;
diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRepository.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRepository.java
index c42cd1802f6e9..faf05469d01e3 100644
--- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRepository.java
+++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRepository.java
@@ -92,14 +92,7 @@ class GoogleCloudStorageRepository extends MeteredBlobStoreRepository {
final ClusterService clusterService,
final RecoverySettings recoverySettings
) {
- super(
- metadata,
- getSetting(COMPRESS_SETTING, metadata),
- namedXContentRegistry,
- clusterService,
- recoverySettings,
- buildLocation(metadata)
- );
+ super(metadata, namedXContentRegistry, clusterService, recoverySettings, buildLocation(metadata));
this.storageService = storageService;
String basePath = BASE_PATH.get(metadata.settings());
diff --git a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsRepository.java b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsRepository.java
index b28d28d76cfde..f0ffec5713c1d 100644
--- a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsRepository.java
+++ b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsRepository.java
@@ -83,7 +83,7 @@ public HdfsRepository(
final ClusterService clusterService,
final RecoverySettings recoverySettings
) {
- super(metadata, COMPRESS_SETTING.get(metadata.settings()), namedXContentRegistry, clusterService, recoverySettings);
+ super(metadata, namedXContentRegistry, clusterService, recoverySettings);
this.environment = environment;
this.chunkSize = metadata.settings().getAsBytesSize("chunk_size", null);
diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java
index fcd8382fa8318..1051d5941a57a 100644
--- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java
+++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java
@@ -68,7 +68,7 @@ class S3BlobStore implements BlobStore {
private final StorageClass storageClass;
- private RepositoryMetadata repositoryMetadata;
+ private volatile RepositoryMetadata repositoryMetadata;
private final StatsMetricPublisher statsMetricPublisher = new StatsMetricPublisher();
@@ -105,20 +105,11 @@ class S3BlobStore implements BlobStore {
this.priorityExecutorBuilder = priorityExecutorBuilder;
}
- @Override
- public boolean isReloadable() {
- return true;
- }
-
@Override
public void reload(RepositoryMetadata repositoryMetadata) {
this.repositoryMetadata = repositoryMetadata;
}
- public boolean isMultipartUploadEnabled() {
- return multipartUploadEnabled;
- }
-
@Override
public String toString() {
return bucket;
diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java
index afdc3c1665958..d8f2df5b83e9a 100644
--- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java
+++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java
@@ -44,6 +44,7 @@
import org.opensearch.common.logging.DeprecationLogger;
import org.opensearch.common.settings.SecureSetting;
import org.opensearch.common.settings.Setting;
+import org.opensearch.common.settings.Settings;
import org.opensearch.core.action.ActionListener;
import org.opensearch.core.common.Strings;
import org.opensearch.core.common.settings.SecureString;
@@ -78,7 +79,6 @@
*
{@code concurrent_streams}Number of concurrent read/write stream (per repository on each node). Defaults to 5.
* {@code chunk_size}
* Large file can be divided into chunks. This parameter specifies the chunk size. Defaults to not chucked.
- * {@code compress}If set to true metadata files will be stored compressed. Defaults to false.
*
*/
class S3Repository extends MeteredBlobStoreRepository {
@@ -204,22 +204,19 @@ class S3Repository extends MeteredBlobStoreRepository {
private final S3Service service;
- private String bucket;
+ private volatile String bucket;
- private ByteSizeValue bufferSize;
+ private volatile ByteSizeValue bufferSize;
- private ByteSizeValue chunkSize;
+ private volatile ByteSizeValue chunkSize;
- private BlobPath basePath;
+ private volatile BlobPath basePath;
- private boolean serverSideEncryption;
+ private volatile boolean serverSideEncryption;
- private String storageClass;
-
- private String cannedACL;
-
- private RepositoryMetadata repositoryMetadata;
+ private volatile String storageClass;
+ private volatile String cannedACL;
private final AsyncTransferManager asyncUploadUtils;
private final S3AsyncService s3AsyncService;
private final boolean multipartUploadEnabled;
@@ -227,6 +224,7 @@ class S3Repository extends MeteredBlobStoreRepository {
private final AsyncExecutorContainer normalExecutorBuilder;
private final Path pluginConfigPath;
+ // Used by test classes
S3Repository(
final RepositoryMetadata metadata,
final NamedXContentRegistry namedXContentRegistry,
@@ -270,14 +268,7 @@ class S3Repository extends MeteredBlobStoreRepository {
final boolean multipartUploadEnabled,
Path pluginConfigPath
) {
- super(
- metadata,
- COMPRESS_SETTING.get(metadata.settings()),
- namedXContentRegistry,
- clusterService,
- recoverySettings,
- buildLocation(metadata)
- );
+ super(metadata, namedXContentRegistry, clusterService, recoverySettings, buildLocation(metadata));
this.service = service;
this.s3AsyncService = s3AsyncService;
this.multipartUploadEnabled = multipartUploadEnabled;
@@ -286,7 +277,7 @@ class S3Repository extends MeteredBlobStoreRepository {
this.priorityExecutorBuilder = priorityExecutorBuilder;
this.normalExecutorBuilder = normalExecutorBuilder;
- readRepositoryMetadata(metadata);
+ readRepositoryMetadata();
}
private static Map buildLocation(RepositoryMetadata metadata) {
@@ -341,14 +332,14 @@ protected S3BlobStore createBlobStore() {
bufferSize,
cannedACL,
storageClass,
- repositoryMetadata,
+ metadata,
asyncUploadUtils,
priorityExecutorBuilder,
normalExecutorBuilder
);
}
- // only use for testing
+ // only use for testing (S3RepositoryTests)
@Override
protected BlobStore getBlobStore() {
return super.getBlobStore();
@@ -368,51 +359,27 @@ public boolean isReloadable() {
public void reload(RepositoryMetadata newRepositoryMetadata) {
// Reload configs for S3Repository
super.reload(newRepositoryMetadata);
- repositoryMetadata = newRepositoryMetadata;
- readRepositoryMetadata(repositoryMetadata);
+ readRepositoryMetadata();
// Reload configs for S3RepositoryPlugin
- final Map clientsSettings = S3ClientSettings.load(repositoryMetadata.settings(), pluginConfigPath);
+ final Map clientsSettings = S3ClientSettings.load(metadata.settings(), pluginConfigPath);
service.refreshAndClearCache(clientsSettings);
s3AsyncService.refreshAndClearCache(clientsSettings);
// Reload configs for S3BlobStore
BlobStore blobStore = getBlobStore();
- blobStore.reload(repositoryMetadata);
+ blobStore.reload(metadata);
}
/**
* Reloads the values derived from the Repository Metadata
- *
- * @param repositoryMetadata RepositoryMetadata instance to derive the values from
*/
- private void readRepositoryMetadata(RepositoryMetadata repositoryMetadata) {
- this.repositoryMetadata = metadata;
+ private void readRepositoryMetadata() {
+ validateRepositoryMetadata();
- // Parse and validate the user's S3 Storage Class setting
this.bucket = BUCKET_SETTING.get(metadata.settings());
- if (bucket == null) {
- throw new RepositoryException(metadata.name(), "No bucket defined for s3 repository");
- }
-
this.bufferSize = BUFFER_SIZE_SETTING.get(metadata.settings());
this.chunkSize = CHUNK_SIZE_SETTING.get(metadata.settings());
-
- // We make sure that chunkSize is bigger or equal than/to bufferSize
- if (this.chunkSize.getBytes() < bufferSize.getBytes()) {
- throw new RepositoryException(
- metadata.name(),
- CHUNK_SIZE_SETTING.getKey()
- + " ("
- + this.chunkSize
- + ") can't be lower than "
- + BUFFER_SIZE_SETTING.getKey()
- + " ("
- + bufferSize
- + ")."
- );
- }
-
final String basePath = BASE_PATH_SETTING.get(metadata.settings());
if (Strings.hasLength(basePath)) {
this.basePath = new BlobPath().add(basePath);
@@ -421,10 +388,8 @@ private void readRepositoryMetadata(RepositoryMetadata repositoryMetadata) {
}
this.serverSideEncryption = SERVER_SIDE_ENCRYPTION_SETTING.get(metadata.settings());
-
this.storageClass = STORAGE_CLASS_SETTING.get(metadata.settings());
this.cannedACL = CANNED_ACL_SETTING.get(metadata.settings());
-
if (S3ClientSettings.checkDeprecatedCredentials(metadata.settings())) {
// provided repository settings
deprecationLogger.deprecate(
@@ -445,6 +410,29 @@ private void readRepositoryMetadata(RepositoryMetadata repositoryMetadata) {
);
}
+ private void validateRepositoryMetadata() {
+ // Parse and validate the user's S3 Storage Class setting
+ Settings settings = metadata.settings();
+ if (BUCKET_SETTING.get(settings) == null) {
+ throw new RepositoryException(metadata.name(), "No bucket defined for s3 repository");
+ }
+
+ // We make sure that chunkSize is bigger or equal than/to bufferSize
+ if (CHUNK_SIZE_SETTING.get(settings).getBytes() < BUFFER_SIZE_SETTING.get(settings).getBytes()) {
+ throw new RepositoryException(
+ metadata.name(),
+ CHUNK_SIZE_SETTING.getKey()
+ + " ("
+ + CHUNK_SIZE_SETTING.get(settings)
+ + ") can't be lower than "
+ + BUFFER_SIZE_SETTING.getKey()
+ + " ("
+ + BUFFER_SIZE_SETTING.get(settings)
+ + ")."
+ );
+ }
+ }
+
@Override
protected ByteSizeValue chunkSize() {
return chunkSize;
diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java
index b13672b4179f8..b1b3e19eac275 100644
--- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java
+++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java
@@ -90,6 +90,7 @@
import java.security.SecureRandom;
import java.time.Duration;
import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
import static java.util.Collections.emptyMap;
@@ -100,7 +101,7 @@ class S3Service implements Closeable {
private static final String DEFAULT_S3_ENDPOINT = "s3.amazonaws.com";
- private volatile Map clientsCache = emptyMap();
+ private volatile Map clientsCache = new ConcurrentHashMap<>();
/**
* Client settings calculated from static configuration and settings in the keystore.
@@ -111,7 +112,7 @@ class S3Service implements Closeable {
* Client settings derived from those in {@link #staticClientSettings} by combining them with settings
* in the {@link RepositoryMetadata}.
*/
- private volatile Map derivedClientSettings = emptyMap();
+ private volatile Map derivedClientSettings = new ConcurrentHashMap<>();
S3Service(final Path configPath) {
staticClientSettings = MapBuilder.newMapBuilder()
diff --git a/server/src/main/java/org/opensearch/common/blobstore/BlobStore.java b/server/src/main/java/org/opensearch/common/blobstore/BlobStore.java
index d75d5f710c3df..2ee3e9557b354 100644
--- a/server/src/main/java/org/opensearch/common/blobstore/BlobStore.java
+++ b/server/src/main/java/org/opensearch/common/blobstore/BlobStore.java
@@ -56,14 +56,6 @@ default Map stats() {
return Collections.emptyMap();
}
- /**
- * Checks if the blob store can be reloaded inplace or not
- * @return true if the blob store can be reloaded inplace, false otherwise
- */
- default boolean isReloadable() {
- return false;
- }
-
/**
* Reload the blob store inplace
*/
diff --git a/server/src/main/java/org/opensearch/repositories/RepositoriesService.java b/server/src/main/java/org/opensearch/repositories/RepositoriesService.java
index b867f16e1f495..50c15f5144c28 100644
--- a/server/src/main/java/org/opensearch/repositories/RepositoriesService.java
+++ b/server/src/main/java/org/opensearch/repositories/RepositoriesService.java
@@ -456,10 +456,11 @@ public void applyClusterState(ClusterChangedEvent event) {
if (previousMetadata.type().equals(repositoryMetadata.type()) == false
|| previousMetadata.settings().equals(repositoryMetadata.settings()) == false) {
// Previous version is different from the version in settings
- logger.debug("updating repository [{}]", repositoryMetadata.name());
if (repository.isSystemRepository() && repository.isReloadable()) {
+ logger.debug("updating repository [{}] in-place", repositoryMetadata.name());
repository.reload(repositoryMetadata);
} else {
+ logger.debug("creating repository [{}] again", repositoryMetadata.name());
closeRepository(repository);
archiveRepositoryStats(repository, state.version());
repository = null;
diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java
index 4fb8bb179ad70..263afee44177b 100644
--- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java
+++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java
@@ -296,21 +296,21 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
Setting.Property.NodeScope
);
- protected boolean supportURLRepo;
+ protected volatile boolean supportURLRepo;
- private int maxShardBlobDeleteBatch;
+ private volatile int maxShardBlobDeleteBatch;
- private Compressor compressor;
+ private volatile Compressor compressor;
- private boolean cacheRepositoryData;
+ private volatile boolean cacheRepositoryData;
- private RateLimiter snapshotRateLimiter;
+ private volatile RateLimiter snapshotRateLimiter;
- private RateLimiter restoreRateLimiter;
+ private volatile RateLimiter restoreRateLimiter;
- private RateLimiter remoteUploadRateLimiter;
+ private volatile RateLimiter remoteUploadRateLimiter;
- private RateLimiter remoteDownloadRateLimiter;
+ private volatile RateLimiter remoteDownloadRateLimiter;
private final CounterMetric snapshotRateLimitingTimeInNanos = new CounterMetric();
@@ -355,9 +355,9 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
BlobStoreIndexShardSnapshots::fromXContent
);
- private boolean readOnly;
+ private volatile boolean readOnly;
- private boolean isSystemRepository;
+ private final boolean isSystemRepository;
private final Object lock = new Object();
@@ -399,7 +399,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
/**
* IO buffer size hint for reading and writing to the underlying blob store.
*/
- protected int bufferSize;
+ protected volatile int bufferSize;
/**
* Constructs new BlobStoreRepository
@@ -408,7 +408,6 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
*/
protected BlobStoreRepository(
final RepositoryMetadata repositoryMetadata,
- final boolean compress,
final NamedXContentRegistry namedXContentRegistry,
final ClusterService clusterService,
final RecoverySettings recoverySettings
diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/MeteredBlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/MeteredBlobStoreRepository.java
index d47bf147a740a..d4921f4e6d2e7 100644
--- a/server/src/main/java/org/opensearch/repositories/blobstore/MeteredBlobStoreRepository.java
+++ b/server/src/main/java/org/opensearch/repositories/blobstore/MeteredBlobStoreRepository.java
@@ -53,13 +53,12 @@ public abstract class MeteredBlobStoreRepository extends BlobStoreRepository {
public MeteredBlobStoreRepository(
RepositoryMetadata metadata,
- boolean compress,
NamedXContentRegistry namedXContentRegistry,
ClusterService clusterService,
RecoverySettings recoverySettings,
Map location
) {
- super(metadata, compress, namedXContentRegistry, clusterService, recoverySettings);
+ super(metadata, namedXContentRegistry, clusterService, recoverySettings);
ThreadPool threadPool = clusterService.getClusterApplierService().threadPool();
this.repositoryInfo = new RepositoryInfo(
UUIDs.randomBase64UUID(),
diff --git a/server/src/main/java/org/opensearch/repositories/fs/FsRepository.java b/server/src/main/java/org/opensearch/repositories/fs/FsRepository.java
index dcd528dc15f01..a866e60d1eea0 100644
--- a/server/src/main/java/org/opensearch/repositories/fs/FsRepository.java
+++ b/server/src/main/java/org/opensearch/repositories/fs/FsRepository.java
@@ -61,7 +61,6 @@
* {@code concurrent_streams}Number of concurrent read/write stream (per repository on each node). Defaults to 5.
* {@code chunk_size}Large file can be divided into chunks. This parameter specifies the chunk size.
* Defaults to not chucked.
- * {@code compress}If set to true metadata files will be stored compressed. Defaults to false.
*
*
* @opensearch.internal
@@ -117,8 +116,27 @@ public FsRepository(
ClusterService clusterService,
RecoverySettings recoverySettings
) {
- super(metadata, calculateCompress(metadata, environment), namedXContentRegistry, clusterService, recoverySettings);
+ super(metadata, namedXContentRegistry, clusterService, recoverySettings);
this.environment = environment;
+ validateLocation();
+ readMetadata();
+ }
+
+ protected void readMetadata() {
+ if (CHUNK_SIZE_SETTING.exists(metadata.settings())) {
+ this.chunkSize = CHUNK_SIZE_SETTING.get(metadata.settings());
+ } else {
+ this.chunkSize = REPOSITORIES_CHUNK_SIZE_SETTING.get(environment.settings());
+ }
+ final String basePath = BASE_PATH_SETTING.get(metadata.settings());
+ if (Strings.hasLength(basePath)) {
+ this.basePath = new BlobPath().add(basePath);
+ } else {
+ this.basePath = BlobPath.cleanPath();
+ }
+ }
+
+ protected void validateLocation() {
String location = REPOSITORIES_LOCATION_SETTING.get(metadata.settings());
if (location.isEmpty()) {
logger.warn(
@@ -151,24 +169,6 @@ public FsRepository(
);
}
}
-
- if (CHUNK_SIZE_SETTING.exists(metadata.settings())) {
- this.chunkSize = CHUNK_SIZE_SETTING.get(metadata.settings());
- } else {
- this.chunkSize = REPOSITORIES_CHUNK_SIZE_SETTING.get(environment.settings());
- }
- final String basePath = BASE_PATH_SETTING.get(metadata.settings());
- if (Strings.hasLength(basePath)) {
- this.basePath = new BlobPath().add(basePath);
- } else {
- this.basePath = BlobPath.cleanPath();
- }
- }
-
- private static boolean calculateCompress(RepositoryMetadata metadata, Environment environment) {
- return COMPRESS_SETTING.exists(metadata.settings())
- ? COMPRESS_SETTING.get(metadata.settings())
- : REPOSITORIES_COMPRESS_SETTING.get(environment.settings());
}
@Override
diff --git a/server/src/main/java/org/opensearch/repositories/fs/ReloadableFsRepository.java b/server/src/main/java/org/opensearch/repositories/fs/ReloadableFsRepository.java
index a40864413c226..87151d80129cb 100644
--- a/server/src/main/java/org/opensearch/repositories/fs/ReloadableFsRepository.java
+++ b/server/src/main/java/org/opensearch/repositories/fs/ReloadableFsRepository.java
@@ -12,14 +12,9 @@
import org.apache.logging.log4j.Logger;
import org.opensearch.cluster.metadata.RepositoryMetadata;
import org.opensearch.cluster.service.ClusterService;
-import org.opensearch.common.blobstore.BlobPath;
-import org.opensearch.core.common.Strings;
import org.opensearch.core.xcontent.NamedXContentRegistry;
import org.opensearch.env.Environment;
import org.opensearch.indices.recovery.RecoverySettings;
-import org.opensearch.repositories.RepositoryException;
-
-import java.nio.file.Path;
/**
* Extension of {@link FsRepository} that can be reloaded inplace
@@ -30,13 +25,7 @@ public class ReloadableFsRepository extends FsRepository {
private static final Logger logger = LogManager.getLogger(ReloadableFsRepository.class);
/**
- * Constructs a shared file system repository.
- *
- * @param metadata
- * @param environment
- * @param namedXContentRegistry
- * @param clusterService
- * @param recoverySettings
+ * Constructs a shared file system repository that is reloadable in-place.
*/
public ReloadableFsRepository(
RepositoryMetadata metadata,
@@ -56,53 +45,7 @@ public boolean isReloadable() {
@Override
public void reload(RepositoryMetadata repositoryMetadata) {
super.reload(repositoryMetadata);
- metadata = repositoryMetadata;
-
- // TODO - deduplicate the below block
- String location = REPOSITORIES_LOCATION_SETTING.get(metadata.settings());
- if (location.isEmpty()) {
- logger.warn(
- "the repository location is missing, it should point to a shared file system location"
- + " that is available on all cluster-manager and data nodes"
- );
- throw new RepositoryException(metadata.name(), "missing location");
- }
- Path locationFile = environment.resolveRepoFile(location);
- if (locationFile == null) {
- if (environment.repoFiles().length > 0) {
- logger.warn(
- "The specified location [{}] doesn't start with any " + "repository paths specified by the path.repo setting: [{}] ",
- location,
- environment.repoFiles()
- );
- throw new RepositoryException(
- metadata.name(),
- "location [" + location + "] doesn't match any of the locations specified by path.repo"
- );
- } else {
- logger.warn(
- "The specified location [{}] should start with a repository path specified by"
- + " the path.repo setting, but the path.repo setting was not set on this node",
- location
- );
- throw new RepositoryException(
- metadata.name(),
- "location [" + location + "] doesn't match any of the locations specified by path.repo because this setting is empty"
- );
- }
- }
-
- if (CHUNK_SIZE_SETTING.exists(metadata.settings())) {
- chunkSize = CHUNK_SIZE_SETTING.get(metadata.settings());
- } else {
- this.chunkSize = REPOSITORIES_CHUNK_SIZE_SETTING.get(environment.settings());
- }
-
- final String path = BASE_PATH_SETTING.get(metadata.settings());
- if (Strings.hasLength(path)) {
- basePath = new BlobPath().add(path);
- } else {
- this.basePath = BlobPath.cleanPath();
- }
+ validateLocation();
+ readMetadata();
}
}
diff --git a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java
index 889d0dc6ddb14..9a8446d33415b 100644
--- a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java
+++ b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java
@@ -851,14 +851,7 @@ private static class MeteredRepositoryTypeA extends MeteredBlobStoreRepository {
private final TestCryptoProvider cryptoHandler;
private MeteredRepositoryTypeA(RepositoryMetadata metadata, ClusterService clusterService) {
- super(
- metadata,
- false,
- mock(NamedXContentRegistry.class),
- clusterService,
- mock(RecoverySettings.class),
- Map.of("bucket", "bucket-a")
- );
+ super(metadata, mock(NamedXContentRegistry.class), clusterService, mock(RecoverySettings.class), Map.of("bucket", "bucket-a"));
if (metadata.cryptoMetadata() != null) {
cryptoHandler = new TestCryptoProvider(
@@ -892,14 +885,7 @@ private static class MeteredRepositoryTypeB extends MeteredBlobStoreRepository {
private final TestCryptoProvider cryptoHandler;
private MeteredRepositoryTypeB(RepositoryMetadata metadata, ClusterService clusterService) {
- super(
- metadata,
- false,
- mock(NamedXContentRegistry.class),
- clusterService,
- mock(RecoverySettings.class),
- Map.of("bucket", "bucket-b")
- );
+ super(metadata, mock(NamedXContentRegistry.class), clusterService, mock(RecoverySettings.class), Map.of("bucket", "bucket-b"));
if (metadata.cryptoMetadata() != null) {
cryptoHandler = new TestCryptoProvider(
diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java
index e097c7025e4fe..9c65ad32fa6a6 100644
--- a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java
+++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java
@@ -252,7 +252,7 @@ public void testBadChunksize() throws Exception {
);
}
- public void testFsRepositoryCompressDeprecated() {
+ public void testFsRepositoryCompressDeprecatedIgnored() {
final Path location = OpenSearchIntegTestCase.randomRepoPath(node().settings());
final Settings settings = Settings.builder().put(node().settings()).put("location", location).build();
final RepositoryMetadata metadata = new RepositoryMetadata("test-repo", REPO_TYPE, settings);
@@ -265,10 +265,7 @@ public void testFsRepositoryCompressDeprecated() {
new FsRepository(metadata, useCompressEnvironment, null, BlobStoreTestUtil.mockClusterService(), null);
- assertWarnings(
- "[repositories.fs.compress] setting was deprecated in OpenSearch and will be removed in a future release!"
- + " See the breaking changes documentation for the next major version."
- );
+ assertNoDeprecationWarnings();
}
private static void writeIndexGen(BlobStoreRepository repository, RepositoryData repositoryData, long generation) throws Exception {
diff --git a/server/src/test/java/org/opensearch/snapshots/mockstore/MockEventuallyConsistentRepository.java b/server/src/test/java/org/opensearch/snapshots/mockstore/MockEventuallyConsistentRepository.java
index ca8bec469f3bc..f9388c9e4b86e 100644
--- a/server/src/test/java/org/opensearch/snapshots/mockstore/MockEventuallyConsistentRepository.java
+++ b/server/src/test/java/org/opensearch/snapshots/mockstore/MockEventuallyConsistentRepository.java
@@ -90,7 +90,7 @@ public MockEventuallyConsistentRepository(
final Context context,
final Random random
) {
- super(metadata, false, namedXContentRegistry, clusterService, recoverySettings);
+ super(metadata, namedXContentRegistry, clusterService, recoverySettings);
this.context = context;
this.namedXContentRegistry = namedXContentRegistry;
this.random = random;