diff --git a/core/src/main/java/com/scalar/db/common/error/CoreError.java b/core/src/main/java/com/scalar/db/common/error/CoreError.java index b02b3c45a..d6a449226 100644 --- a/core/src/main/java/com/scalar/db/common/error/CoreError.java +++ b/core/src/main/java/com/scalar/db/common/error/CoreError.java @@ -941,6 +941,18 @@ public enum CoreError implements ScalarDbError { "Handling the before-preparation snapshot hook failed. Details: %s", "", ""), + DATA_LOADER_ERROR_CRUD_EXCEPTION( + Category.INTERNAL_ERROR, + "0047", + "Something went wrong while trying to save the data. Details %s", + "", + ""), + DATA_LOADER_ERROR_SCAN( + Category.INTERNAL_ERROR, + "0048", + "Something went wrong while scanning. Are you sure you are running in the correct transaction mode? Details %s", + "", + ""), // // Errors for the unknown transaction status error category diff --git a/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/dao/ScalarDBDao.java b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/dao/ScalarDBDao.java new file mode 100644 index 000000000..e7270de8e --- /dev/null +++ b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/dao/ScalarDBDao.java @@ -0,0 +1,421 @@ +package com.scalar.db.dataloader.core.dataimport.dao; + +import com.scalar.db.api.*; +import com.scalar.db.api.PutBuilder.Buildable; +import com.scalar.db.common.error.CoreError; +import com.scalar.db.dataloader.core.ScanRange; +import com.scalar.db.exception.storage.ExecutionException; +import com.scalar.db.exception.transaction.CrudException; +import com.scalar.db.io.Column; +import com.scalar.db.io.Key; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.NoSuchElementException; +import java.util.Optional; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** The generic DAO that is used to scan ScalarDB data */ +public class ScalarDBDao { + + /* Class logger */ + private static final Logger logger = LoggerFactory.getLogger(ScalarDBDao.class); + private static final String GET_COMPLETED_MSG = "GET completed for %s"; + private static final String PUT_COMPLETED_MSG = "PUT completed for %s"; + private static final String SCAN_START_MSG = "SCAN started..."; + private static final String SCAN_END_MSG = "SCAN completed"; + + /** + * Retrieve record from ScalarDB instance in storage mode + * + * @param namespace Namespace name + * @param table Table name + * @param partitionKey Partition key + * @param clusteringKey Optional clustering key for get + * @param storage Distributed storage for ScalarDB connection that is running in storage mode. + * @return Optional get result + * @throws ScalarDBDaoException if something goes wrong while reading the data + */ + public Optional get( + String namespace, + String table, + Key partitionKey, + Key clusteringKey, + DistributedStorage storage) + throws ScalarDBDaoException { + + // Retrieving the key data for logging + String loggingKey = keysToString(partitionKey, clusteringKey); + + try { + Get get = createGetWith(namespace, table, partitionKey, clusteringKey); + Optional result = storage.get(get); + logger.info(String.format(GET_COMPLETED_MSG, loggingKey)); + return result; + } catch (ExecutionException e) { + throw new ScalarDBDaoException("error GET " + loggingKey, e); + } + } + + /** + * Retrieve record from ScalarDB instance in transaction mode + * + * @param namespace Namespace name + * @param table Table name + * @param partitionKey Partition key + * @param clusteringKey Optional clustering key for get + * @param transaction ScalarDB transaction instance + * @return Optional get result + * @throws ScalarDBDaoException if something goes wrong while reading the data + */ + public Optional get( + String namespace, + String table, + Key partitionKey, + Key clusteringKey, + DistributedTransaction transaction) + throws ScalarDBDaoException { + + Get get = createGetWith(namespace, table, partitionKey, clusteringKey); + // Retrieving the key data for logging + String loggingKey = keysToString(partitionKey, clusteringKey); + try { + Optional result = transaction.get(get); + logger.info(String.format(GET_COMPLETED_MSG, loggingKey)); + return result; + } catch (CrudException e) { + throw new ScalarDBDaoException("error GET " + loggingKey, e.getCause()); + } + } + + /** + * Save record in ScalarDB instance + * + * @param namespace Namespace name + * @param table Table name + * @param partitionKey Partition key + * @param clusteringKey Optional clustering key + * @param columns List of column values to be inserted or updated + * @param transaction ScalarDB transaction instance + * @throws ScalarDBDaoException if something goes wrong while executing the transaction + */ + public void put( + String namespace, + String table, + Key partitionKey, + Key clusteringKey, + List> columns, + DistributedTransaction transaction) + throws ScalarDBDaoException { + + Put put = createPutWith(namespace, table, partitionKey, clusteringKey, columns); + try { + transaction.put(put); + } catch (CrudException e) { + throw new ScalarDBDaoException( + CoreError.DATA_LOADER_ERROR_CRUD_EXCEPTION.buildMessage(e.getMessage()), e); + } + logger.info(String.format(PUT_COMPLETED_MSG, keysToString(partitionKey, clusteringKey))); + } + + /** + * Save record in ScalarDB instance + * + * @param namespace Namespace name + * @param table Table name + * @param partitionKey Partition key + * @param clusteringKey Optional clustering key + * @param columns List of column values to be inserted or updated + * @param storage Distributed storage for ScalarDB connection that is running in storage mode + * @throws ScalarDBDaoException if something goes wrong while executing the transaction + */ + public void put( + String namespace, + String table, + Key partitionKey, + Key clusteringKey, + List> columns, + DistributedStorage storage) + throws ScalarDBDaoException { + Put put = createPutWith(namespace, table, partitionKey, clusteringKey, columns); + try { + storage.put(put); + } catch (ExecutionException e) { + throw new ScalarDBDaoException( + CoreError.DATA_LOADER_ERROR_CRUD_EXCEPTION.buildMessage(e.getMessage()), e); + } + logger.info(String.format(PUT_COMPLETED_MSG, keysToString(partitionKey, clusteringKey))); + } + + /** + * Scan a ScalarDB table + * + * @param namespace ScalarDB namespace + * @param table ScalarDB table name + * @param partitionKey Partition key used in ScalarDB scan + * @param range Optional range to set ScalarDB scan start and end values + * @param sorts Optional scan clustering key sorting values + * @param projections List of column projection to use during scan + * @param limit Scan limit value + * @param storage Distributed storage for ScalarDB connection that is running in storage mode + * @return List of ScalarDB scan results + * @throws ScalarDBDaoException if scan fails + */ + public List scan( + String namespace, + String table, + Key partitionKey, + ScanRange range, + List sorts, + List projections, + int limit, + DistributedStorage storage) + throws ScalarDBDaoException { + // Create scan + Scan scan = createScan(namespace, table, partitionKey, range, sorts, projections, limit); + + // scan data + try { + logger.info(SCAN_START_MSG); + Scanner scanner = storage.scan(scan); + List allResults = scanner.all(); + scanner.close(); + logger.info(SCAN_END_MSG); + return allResults; + } catch (ExecutionException | IOException e) { + throw new ScalarDBDaoException( + CoreError.DATA_LOADER_ERROR_SCAN.buildMessage(e.getMessage()), e); + } + } + + /** + * Scan a ScalarDB table + * + * @param namespace ScalarDB namespace + * @param table ScalarDB table name + * @param partitionKey Partition key used in ScalarDB scan + * @param range Optional range to set ScalarDB scan start and end values + * @param sorts Optional scan clustering key sorting values + * @param projections List of column projection to use during scan + * @param limit Scan limit value + * @param transaction Distributed Transaction manager for ScalarDB connection that is * running in + * transaction mode + * @return List of ScalarDB scan results + * @throws ScalarDBDaoException if scan fails + */ + public List scan( + String namespace, + String table, + Key partitionKey, + ScanRange range, + List sorts, + List projections, + int limit, + DistributedTransaction transaction) + throws ScalarDBDaoException { + + // Create scan + Scan scan = createScan(namespace, table, partitionKey, range, sorts, projections, limit); + + // scan data + try { + logger.info(SCAN_START_MSG); + List results = transaction.scan(scan); + logger.info(SCAN_END_MSG); + return results; + } catch (CrudException | NoSuchElementException e) { + // No such element Exception is thrown when the scan is done in transaction mode but + // ScalarDB is running in storage mode + throw new ScalarDBDaoException( + CoreError.DATA_LOADER_ERROR_SCAN.buildMessage(e.getMessage()), e); + } + } + + /** + * Create a ScalarDB scanner instance + * + * @param namespace ScalarDB namespace + * @param table ScalarDB table name + * @param projectionColumns List of column projection to use during scan + * @param limit Scan limit value + * @param storage Distributed storage for ScalarDB connection that is running in storage mode + * @return ScalarDB Scanner object + * @throws ScalarDBDaoException if scan fails + */ + public Scanner createScanner( + String namespace, + String table, + List projectionColumns, + int limit, + DistributedStorage storage) + throws ScalarDBDaoException { + Scan scan = + createScan(namespace, table, null, null, new ArrayList<>(), projectionColumns, limit); + try { + return storage.scan(scan); + } catch (ExecutionException e) { + throw new ScalarDBDaoException( + CoreError.DATA_LOADER_ERROR_SCAN.buildMessage(e.getMessage()), e); + } + } + + /** + * Create a ScalarDB scanner instance + * + * @param namespace ScalarDB namespace + * @param table ScalarDB table name + * @param partitionKey Partition key used in ScalarDB scan + * @param scanRange Optional range to set ScalarDB scan start and end values + * @param sortOrders Optional scan clustering key sorting values + * @param projectionColumns List of column projection to use during scan + * @param limit Scan limit value + * @param storage Distributed storage for ScalarDB connection that is running in storage mode + * @return ScalarDB Scanner object + * @throws ScalarDBDaoException if scan fails + */ + public Scanner createScanner( + String namespace, + String table, + Key partitionKey, + ScanRange scanRange, + List sortOrders, + List projectionColumns, + int limit, + DistributedStorage storage) + throws ScalarDBDaoException { + Scan scan = + createScan(namespace, table, partitionKey, scanRange, sortOrders, projectionColumns, limit); + try { + return storage.scan(scan); + } catch (ExecutionException e) { + throw new ScalarDBDaoException( + CoreError.DATA_LOADER_ERROR_SCAN.buildMessage(e.getMessage()), e); + } + } + + /** + * Create ScalarDB scan instance + * + * @param namespace ScalarDB namespace + * @param table ScalarDB table name + * @param partitionKey Partition key used in ScalarDB scan + * @param scanRange Optional range to set ScalarDB scan start and end values + * @param sortOrders Optional scan clustering key sorting values + * @param projectionColumns List of column projection to use during scan + * @param limit Scan limit value + * @return ScalarDB scan instance + */ + Scan createScan( + String namespace, + String table, + Key partitionKey, + ScanRange scanRange, + List sortOrders, + List projectionColumns, + int limit) { + // If no partition key is provided a scan all is created + if (partitionKey == null) { + ScanBuilder.BuildableScanAll buildableScanAll = + Scan.newBuilder().namespace(namespace).table(table).all(); + + // projection columns + if (projectionColumns != null && !projectionColumns.isEmpty()) { + buildableScanAll.projections(projectionColumns); + } + + // limit + if (limit > 0) { + buildableScanAll.limit(limit); + } + return buildableScanAll.build(); + } + + // Create a scan with partition key (not a scan all) + ScanBuilder.BuildableScan buildableScan = + Scan.newBuilder().namespace(namespace).table(table).partitionKey(partitionKey); + + // Set the scan boundary + if (scanRange != null) { + // Set boundary start + if (scanRange.getScanStartKey() != null) { + buildableScan.start(scanRange.getScanStartKey(), scanRange.isStartInclusive()); + } + + // with end + if (scanRange.getScanEndKey() != null) { + buildableScan.end(scanRange.getScanEndKey(), scanRange.isEndInclusive()); + } + } + + // clustering order + for (Scan.Ordering sort : sortOrders) { + buildableScan.ordering(sort); + } + + // projections + if (projectionColumns != null && !projectionColumns.isEmpty()) { + buildableScan.projections(projectionColumns); + } + + // limit + if (limit > 0) { + buildableScan.limit(limit); + } + return buildableScan.build(); + } + + /** + * Return a ScalarDB get based on provided parameters + * + * @param namespace Namespace name + * @param table Table name + * @param partitionKey Partition key + * @param clusteringKey Optional clustering key for get + * @return ScalarDB Get instance + */ + private Get createGetWith(String namespace, String table, Key partitionKey, Key clusteringKey) { + GetBuilder.BuildableGetWithPartitionKey buildable = + Get.newBuilder().namespace(namespace).table(table).partitionKey(partitionKey); + if (clusteringKey != null) { + buildable.clusteringKey(clusteringKey); + } + return buildable.build(); + } + + /** + * Return a ScalarDB put based on provided parameters + * + * @param namespace Namespace name + * @param table Table name + * @param partitionKey Partition key + * @param clusteringKey Optional clustering key + * @param columns List of column values + * @return ScalarDB Put Instance + */ + private Put createPutWith( + String namespace, + String table, + Key partitionKey, + Key clusteringKey, + List> columns) { + Buildable buildable = + Put.newBuilder().namespace(namespace).table(table).partitionKey(partitionKey); + if (clusteringKey != null) { + buildable.clusteringKey(clusteringKey); + } + + for (Column column : columns) { + buildable.value(column); + } + return buildable.build(); + } + + private String keysToString(Key partitionKey, Key clusteringKey) { + if (clusteringKey != null) { + return partitionKey.toString() + "," + clusteringKey; + } else { + return partitionKey.toString(); + } + } +} diff --git a/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/dao/ScalarDBDaoException.java b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/dao/ScalarDBDaoException.java new file mode 100644 index 000000000..1e50affb0 --- /dev/null +++ b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/dao/ScalarDBDaoException.java @@ -0,0 +1,15 @@ +package com.scalar.db.dataloader.core.dataimport.dao; + +/** A custom DAO exception that encapsulates errors thrown by ScalarDB operations */ +public class ScalarDBDaoException extends Exception { + + /** + * Class constructor + * + * @param message error message + * @param cause reason for exception + */ + public ScalarDBDaoException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/dao/ScalarDBManager.java b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/dao/ScalarDBManager.java new file mode 100644 index 000000000..1016eaaba --- /dev/null +++ b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataimport/dao/ScalarDBManager.java @@ -0,0 +1,68 @@ +package com.scalar.db.dataloader.core.dataimport.dao; + +import com.scalar.db.api.DistributedStorage; +import com.scalar.db.api.DistributedStorageAdmin; +import com.scalar.db.api.DistributedTransactionAdmin; +import com.scalar.db.api.DistributedTransactionManager; +import com.scalar.db.service.StorageFactory; +import com.scalar.db.service.TransactionFactory; +import java.io.IOException; +import javax.annotation.Nullable; + +/** + * A manager to retrieve the various ScalarDB managers based on the running mode + * + * @author Yves Peckstadt + */ +public class ScalarDBManager { + + /* Distributed storage for ScalarDB connection that is running in storage mode. */ + @Nullable private final DistributedStorage storage; + /* Distributed Transaction manager for ScalarDB connection that is running in transaction mode */ + private final DistributedTransactionManager transactionManager; + /* Distributed storage admin for ScalarDB admin operations */ + private final DistributedStorageAdmin storageAdmin; + private final DistributedTransactionAdmin transactionAdmin; + + /** + * Class constructor + * + * @param storageFactory Factory to create all the necessary ScalarDB data managers + */ + public ScalarDBManager(StorageFactory storageFactory) throws IOException { + storage = storageFactory.getStorage(); + storageAdmin = storageFactory.getStorageAdmin(); + transactionManager = null; + transactionAdmin = null; + } + + /** + * Class constructor + * + * @param transactionFactory Factory to create all the necessary ScalarDB data managers + */ + public ScalarDBManager(TransactionFactory transactionFactory) throws IOException { + transactionManager = transactionFactory.getTransactionManager(); + transactionAdmin = transactionFactory.getTransactionAdmin(); + storageAdmin = null; + storage = null; + } + + /** @return storage for ScalarDB connection that is running in storage mode */ + public DistributedStorage getDistributedStorage() { + return storage; + } + + /** + * @return Distributed Transaction manager for ScalarDB connection that is running in transaction + * mode + */ + public DistributedTransactionManager getDistributedTransactionManager() { + return transactionManager; + } + + /** @return Distributed storage admin for ScalarDB admin operations */ + public DistributedStorageAdmin getDistributedStorageAdmin() { + return storageAdmin; + } +} diff --git a/data-loader/core/src/test/java/com/scalar/db/dataloader/core/dataimport/dao/ScalarDBDaoTest.java b/data-loader/core/src/test/java/com/scalar/db/dataloader/core/dataimport/dao/ScalarDBDaoTest.java new file mode 100644 index 000000000..c46843156 --- /dev/null +++ b/data-loader/core/src/test/java/com/scalar/db/dataloader/core/dataimport/dao/ScalarDBDaoTest.java @@ -0,0 +1,225 @@ +package com.scalar.db.dataloader.core.dataimport.dao; + +import static com.scalar.db.dataloader.core.UnitTestUtils.*; +import static org.assertj.core.api.Assertions.assertThat; + +import com.scalar.db.api.Scan; +import com.scalar.db.api.ScanBuilder; +import com.scalar.db.dataloader.core.ScanRange; +import com.scalar.db.io.Key; +import java.util.*; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +class ScalarDBDaoTest { + + private static final int TEST_VALUE_INT_MIN = 1; + private ScalarDBDao dao; + + @BeforeEach + void setUp() { + this.dao = new ScalarDBDao(); + } + + @Test + void createScan_scanWithPartitionKey_shouldCreateScanObjectWithPartitionKey() { + + // Create Scan Object + Scan scan = + this.dao.createScan( + TEST_NAMESPACE, + TEST_TABLE_NAME, + Key.newBuilder().addBigInt(TEST_COLUMN_1_PK, TEST_VALUE_LONG).build(), + new ScanRange(null, null, false, false), + new ArrayList<>(), + new ArrayList<>(), + 0); + + // Create expected result + Scan expectedResult = + generateScanResult( + Key.newBuilder().addBigInt(TEST_COLUMN_1_PK, TEST_VALUE_LONG).build(), + new ScanRange(null, null, false, false), + new ArrayList<>(), + new ArrayList<>(), + 0); + + // Compare Scan object + assertThat(scan.toString()).isEqualTo(expectedResult.toString()); + } + + @Test + void createScan_scanWithLimitAndProjection_shouldCreateScanObjectWithLimitAndProjection() { + + // Create Scan Object + Scan scan = + this.dao.createScan( + TEST_NAMESPACE, + TEST_TABLE_NAME, + Key.newBuilder().addBigInt(TEST_COLUMN_1_PK, TEST_VALUE_LONG).build(), + new ScanRange(null, null, false, false), + new ArrayList<>(), + Arrays.asList(TEST_COLUMN_4, TEST_COLUMN_5, TEST_COLUMN_6), + 5); + + // Create expected result + Scan expectedResult = + generateScanResult( + Key.newBuilder().addBigInt(TEST_COLUMN_1_PK, TEST_VALUE_LONG).build(), + new ScanRange(null, null, false, false), + new ArrayList<>(), + Arrays.asList(TEST_COLUMN_4, TEST_COLUMN_5, TEST_COLUMN_6), + 5); + + // Compare Scan object + assertThat(scan.toString()).isEqualTo(expectedResult.toString()); + } + + @Test + void createScan_scanWithScanRangeAndOrder_shouldCreateScanObjectWithSortAndRange() { + + // Create Scan Object + Scan scan = + this.dao.createScan( + TEST_NAMESPACE, + TEST_TABLE_NAME, + Key.newBuilder().addBigInt(TEST_COLUMN_1_PK, TEST_VALUE_LONG).build(), + new ScanRange( + Key.newBuilder().addInt(TEST_COLUMN_2_CK, TEST_VALUE_INT_MIN).build(), + Key.newBuilder().addInt(TEST_COLUMN_2_CK, TEST_VALUE_INT).build(), + true, + false), + Arrays.asList(Scan.Ordering.asc(TEST_COLUMN_2_CK)), + new ArrayList<>(), + 0); + // Create expected result + Scan expectedResult = + generateScanResult( + Key.newBuilder().addBigInt(TEST_COLUMN_1_PK, TEST_VALUE_LONG).build(), + new ScanRange( + Key.newBuilder().addInt(TEST_COLUMN_2_CK, TEST_VALUE_INT_MIN).build(), + Key.newBuilder().addInt(TEST_COLUMN_2_CK, TEST_VALUE_INT).build(), + true, + false), + Arrays.asList(Scan.Ordering.asc(TEST_COLUMN_2_CK)), + new ArrayList<>(), + 0); + // Compare Scan object + assertThat(scan.toString()).isEqualTo(expectedResult.toString()); + } + + @Test + void createScan_scanWithoutPartitionKey_shouldCreateScanAllObject() { + + // Create Scan Object + Scan scan = + this.dao.createScan( + TEST_NAMESPACE, + TEST_TABLE_NAME, + null, + new ScanRange(null, null, false, false), + new ArrayList<>(), + new ArrayList<>(), + 0); + + // Create expected result + Scan expectedResult = generateScanAllResult(new ArrayList<>(), 0); + + // Compare ScanAll object + assertThat(scan.toString()).isEqualTo(expectedResult.toString()); + } + + @Test + void createScan_scanAllWithLimitAndProjection_shouldCreateScanAllObjectWithLimitAndProjection() { + + // Create Scan Object + Scan scan = + this.dao.createScan( + TEST_NAMESPACE, + TEST_TABLE_NAME, + null, + new ScanRange(null, null, false, false), + new ArrayList<>(), + Arrays.asList(TEST_COLUMN_4, TEST_COLUMN_5, TEST_COLUMN_6), + 5); + + // Create expected result + Scan expectedResult = + generateScanAllResult(Arrays.asList(TEST_COLUMN_4, TEST_COLUMN_5, TEST_COLUMN_6), 5); + + // Compare ScanAll object + assertThat(scan.toString()).isEqualTo(expectedResult.toString()); + } + + /** + * Create Scan Object + * + * @param partitionKey Partition key used in ScalarDB scan + * @param range Optional range to set ScalarDB scan start and end values + * @param sorts Optional scan clustering key sorting values + * @param projections List of column projection to use during scan + * @param limit Scan limit value + * @return ScalarDB scan instance + */ + private Scan generateScanResult( + Key partitionKey, + ScanRange range, + List sorts, + List projections, + int limit) { + ScanBuilder.BuildableScan scan = + Scan.newBuilder() + .namespace(TEST_NAMESPACE) + .table(TEST_TABLE_NAME) + .partitionKey(partitionKey); + + // Set boundary start + if (range.getScanStartKey() != null) { + scan.start(range.getScanStartKey(), range.isStartInclusive()); + } + + // with end + if (range.getScanEndKey() != null) { + scan.end(range.getScanEndKey(), range.isEndInclusive()); + } + + // clustering order + for (Scan.Ordering sort : sorts) { + scan.ordering(sort); + } + + // projections + if (projections != null && !projections.isEmpty()) { + scan.projections(projections); + } + + // limit + if (limit > 0) { + scan.limit(limit); + } + return scan.build(); + } + + /** + * Create ScanAll Object + * + * @param projections List of column projection to use during scan + * @param limit Scan limit value + * @return ScalarDB scan instance + */ + private Scan generateScanAllResult(List projections, int limit) { + ScanBuilder.BuildableScanAll scan = + Scan.newBuilder().namespace(TEST_NAMESPACE).table(TEST_TABLE_NAME).all(); + + // projections + if (projections != null && !projections.isEmpty()) { + scan.projections(projections); + } + + // limit + if (limit > 0) { + scan.limit(limit); + } + return scan.build(); + } +} diff --git a/gradle/spotbugs-exclude.xml b/gradle/spotbugs-exclude.xml index 172474047..0479b8fa2 100644 --- a/gradle/spotbugs-exclude.xml +++ b/gradle/spotbugs-exclude.xml @@ -36,7 +36,7 @@ - +