diff --git a/hapi/hedera-protobufs/services/state/roster/roster.proto b/hapi/hedera-protobufs/services/state/roster/roster.proto
index 3d822062643b..d6b7f6ab1af7 100644
--- a/hapi/hedera-protobufs/services/state/roster/roster.proto
+++ b/hapi/hedera-protobufs/services/state/roster/roster.proto
@@ -37,7 +37,7 @@ message Roster {
* This list SHALL contain roster entries in natural order of ascending node ids.
* This list SHALL NOT be empty.
*/
- repeated RosterEntry rosters = 1;
+ repeated RosterEntry roster_entries = 1;
}
/**
diff --git a/hedera-node/configuration/dev/application.properties b/hedera-node/configuration/dev/application.properties
index f3cf6bb08774..56c01f9a71b1 100644
--- a/hedera-node/configuration/dev/application.properties
+++ b/hedera-node/configuration/dev/application.properties
@@ -2,6 +2,7 @@ balances.exportDir.path=data/accountBalances/
upgrade.artifacts.path=data/upgrade
contracts.chainId=298
contracts.maxGasPerSec=15000000000
+contracts.systemContract.tokenInfo.v2.enabled=true
# Needed for end-end tests running on mod-service code
staking.periodMins=1
staking.fees.nodeRewardPercentage=10
diff --git a/hedera-node/hapi-utils/src/main/java/com/hedera/node/app/hapi/utils/contracts/ParsingConstants.java b/hedera-node/hapi-utils/src/main/java/com/hedera/node/app/hapi/utils/contracts/ParsingConstants.java
index d79e6a599440..fb0d100a1418 100644
--- a/hedera-node/hapi-utils/src/main/java/com/hedera/node/app/hapi/utils/contracts/ParsingConstants.java
+++ b/hedera-node/hapi-utils/src/main/java/com/hedera/node/app/hapi/utils/contracts/ParsingConstants.java
@@ -60,6 +60,8 @@ private ParsingConstants() {
"(" + "string,string,address,string,bool,int64,bool," + TOKEN_KEY + ARRAY_BRACKETS + "," + EXPIRY + ")";
public static final String HEDERA_TOKEN_V3 =
"(" + "string,string,address,string,bool,int64,bool," + TOKEN_KEY + ARRAY_BRACKETS + "," + EXPIRY_V2 + ")";
+ public static final String HEDERA_TOKEN_V4 = "(" + "string,string,address,string,bool,uint32,bool," + TOKEN_KEY
+ + ARRAY_BRACKETS + "," + EXPIRY + ",bytes" + ")";
public static final String TOKEN_INFO = "("
+ HEDERA_TOKEN_V2
+ ",int64,bool,bool,bool,"
@@ -135,8 +137,11 @@ public enum FunctionType {
HAPI_TRANSFER_FROM_NFT,
HAPI_GET_APPROVED,
HAPI_GET_FUNGIBLE_TOKEN_INFO,
+ HAPI_GET_FUNGIBLE_TOKEN_INFO_V2,
HAPI_GET_TOKEN_INFO,
+ HAPI_GET_TOKEN_INFO_V2,
HAPI_GET_NON_FUNGIBLE_TOKEN_INFO,
+ HAPI_GET_NON_FUNGIBLE_TOKEN_INFO_V2,
HAPI_IS_APPROVED_FOR_ALL,
HAPI_IS_KYC,
GET_TOKEN_DEFAULT_FREEZE_STATUS,
diff --git a/hedera-node/hedera-app/build.gradle.kts b/hedera-node/hedera-app/build.gradle.kts
index e8ea30e9bf45..44776cc290de 100644
--- a/hedera-node/hedera-app/build.gradle.kts
+++ b/hedera-node/hedera-app/build.gradle.kts
@@ -165,7 +165,10 @@ val cleanRun =
tasks.clean { dependsOn(cleanRun) }
-tasks.register("showHapiVersion") { doLast { println(libs.versions.hapi.proto.get()) } }
+tasks.register("showHapiVersion") {
+ inputs.property("version", project.version)
+ doLast { println(inputs.properties["version"]) }
+}
var updateDockerEnvTask =
tasks.register("updateDockerEnv") {
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/Hedera.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/Hedera.java
index f06d69f3c321..2f936bddc0aa 100644
--- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/Hedera.java
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/Hedera.java
@@ -16,6 +16,7 @@
package com.hedera.node.app;
+import static com.hedera.node.app.blocks.BlockStreamService.FAKE_RESTART_BLOCK_HASH;
import static com.hedera.node.app.info.UnavailableNetworkInfo.UNAVAILABLE_NETWORK_INFO;
import static com.hedera.node.app.records.schemas.V0490BlockRecordSchema.BLOCK_INFO_STATE_KEY;
import static com.hedera.node.app.state.merkle.VersionUtils.isSoOrdered;
@@ -36,6 +37,7 @@
import com.hedera.hapi.node.base.SemanticVersion;
import com.hedera.hapi.node.state.blockrecords.BlockInfo;
import com.hedera.hapi.util.HapiUtils;
+import com.hedera.node.app.blocks.BlockStreamManager;
import com.hedera.node.app.blocks.BlockStreamService;
import com.hedera.node.app.blocks.impl.BoundaryStateChangeListener;
import com.hedera.node.app.blocks.impl.KVStateChangeListener;
@@ -69,6 +71,8 @@
import com.hedera.node.app.statedumpers.MerkleStateChild;
import com.hedera.node.app.store.ReadableStoreFactory;
import com.hedera.node.app.throttle.CongestionThrottleService;
+import com.hedera.node.app.tss.TssBaseService;
+import com.hedera.node.app.tss.impl.PlaceholderTssBaseService;
import com.hedera.node.app.version.HederaSoftwareVersion;
import com.hedera.node.app.version.ServicesSoftwareVersion;
import com.hedera.node.app.workflows.handle.HandleWorkflow;
@@ -190,6 +194,11 @@ public final class Hedera implements SwirldMain, PlatformStatusChangeListener {
*/
private final InstantSource instantSource;
+ /**
+ * The supplier for the TSS base service.
+ */
+ private final Supplier tssBaseServiceSupplier;
+
/**
* The contract service singleton, kept as a field here to avoid constructing twice
* (once in constructor to register schemas, again inside Dagger component).
@@ -202,6 +211,12 @@ public final class Hedera implements SwirldMain, PlatformStatusChangeListener {
*/
private final FileServiceImpl fileServiceImpl;
+ /**
+ * The block stream service singleton, kept as a field here to reuse information learned
+ * during the state migration phase in the later initialization phase.
+ */
+ private final BlockStreamService blockStreamService;
+
/**
* The bootstrap configuration provider for the network.
*/
@@ -268,14 +283,17 @@ public final class Hedera implements SwirldMain, PlatformStatusChangeListener {
* @param constructableRegistry the registry to register {@link RuntimeConstructable} factories with
* @param registryFactory the factory to use for creating the services registry
* @param migrator the migrator to use with the services
+ * @param tssBaseServiceSupplier the supplier for the TSS base service
*/
public Hedera(
@NonNull final ConstructableRegistry constructableRegistry,
@NonNull final ServicesRegistry.Factory registryFactory,
@NonNull final ServiceMigrator migrator,
- @NonNull final InstantSource instantSource) {
+ @NonNull final InstantSource instantSource,
+ @NonNull final Supplier tssBaseServiceSupplier) {
requireNonNull(registryFactory);
requireNonNull(constructableRegistry);
+ this.tssBaseServiceSupplier = requireNonNull(tssBaseServiceSupplier);
this.serviceMigrator = requireNonNull(migrator);
this.instantSource = requireNonNull(instantSource);
logger.info(
@@ -306,6 +324,7 @@ public Hedera(
new SignatureExpanderImpl(),
new SignatureVerifierImpl(CryptographyHolder.get())));
contractServiceImpl = new ContractServiceImpl(appContext);
+ blockStreamService = new BlockStreamService(bootstrapConfig);
// Register all service schema RuntimeConstructable factories before platform init
Set.of(
new EntityIdService(),
@@ -318,7 +337,7 @@ public Hedera(
new UtilServiceImpl(),
new RecordCacheService(),
new BlockRecordService(),
- new BlockStreamService(bootstrapConfig),
+ blockStreamService,
new FeeService(),
new CongestionThrottleService(),
new NetworkServiceImpl(),
@@ -775,6 +794,7 @@ private void initializeDagger(
@NonNull final InitTrigger trigger,
@NonNull final List migrationStateChanges) {
final var notifications = platform.getNotificationEngine();
+ final var blockStreamEnabled = isBlockStreamEnabled();
// The Dagger component should be constructed every time we reach this point, even if
// it exists (this avoids any problems with mutable singleton state by reconstructing
// everything); but we must ensure the gRPC server in the old component is fully stopped,
@@ -784,6 +804,9 @@ private void initializeDagger(
notifications.unregister(PlatformStatusChangeListener.class, this);
notifications.unregister(ReconnectCompleteListener.class, daggerApp.reconnectListener());
notifications.unregister(StateWriteToDiskCompleteListener.class, daggerApp.stateWriteToDiskListener());
+ if (blockStreamEnabled) {
+ daggerApp.tssBaseService().unregisterLedgerSignatureConsumer(daggerApp.blockStreamManager());
+ }
}
// Fully qualified so as to not confuse javadoc
daggerApp = com.hedera.node.app.DaggerHederaInjectionComponent.builder()
@@ -804,12 +827,36 @@ private void initializeDagger(
.kvStateChangeListener(kvStateChangeListener)
.boundaryStateChangeListener(boundaryStateChangeListener)
.migrationStateChanges(migrationStateChanges)
+ .tssBaseService(tssBaseServiceSupplier.get())
.build();
// Initialize infrastructure for fees, exchange rates, and throttles from the working state
daggerApp.initializer().accept(state);
notifications.register(PlatformStatusChangeListener.class, this);
notifications.register(ReconnectCompleteListener.class, daggerApp.reconnectListener());
notifications.register(StateWriteToDiskCompleteListener.class, daggerApp.stateWriteToDiskListener());
+ if (blockStreamEnabled) {
+ daggerApp
+ .blockStreamManager()
+ .initLastBlockHash(
+ switch (trigger) {
+ case GENESIS -> BlockStreamManager.ZERO_BLOCK_HASH;
+ // FUTURE - get the actual last block hash from e.g. a reconnect teacher or disk
+ default -> blockStreamService
+ .migratedLastBlockHash()
+ .orElse(FAKE_RESTART_BLOCK_HASH);
+ });
+ daggerApp.tssBaseService().registerLedgerSignatureConsumer(daggerApp.blockStreamManager());
+ if (daggerApp.tssBaseService() instanceof PlaceholderTssBaseService placeholderTssBaseService) {
+ daggerApp.inject(placeholderTssBaseService);
+ }
+ }
+ }
+
+ private boolean isBlockStreamEnabled() {
+ return bootstrapConfigProvider
+ .getConfiguration()
+ .getConfigData(BlockStreamConfig.class)
+ .streamBlocks();
}
private static ServicesSoftwareVersion getNodeStartupVersion(@NonNull final Configuration config) {
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/HederaInjectionComponent.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/HederaInjectionComponent.java
index d956e8fd7936..6895096b58a7 100644
--- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/HederaInjectionComponent.java
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/HederaInjectionComponent.java
@@ -47,6 +47,8 @@
import com.hedera.node.app.state.WorkingStateAccessor;
import com.hedera.node.app.throttle.ThrottleServiceManager;
import com.hedera.node.app.throttle.ThrottleServiceModule;
+import com.hedera.node.app.tss.TssBaseService;
+import com.hedera.node.app.tss.impl.PlaceholderTssBaseService;
import com.hedera.node.app.workflows.FacilityInitModule;
import com.hedera.node.app.workflows.WorkflowsInjectionModule;
import com.hedera.node.app.workflows.handle.HandleWorkflow;
@@ -132,6 +134,10 @@ public interface HederaInjectionComponent {
StoreMetricsService storeMetricsService();
+ TssBaseService tssBaseService();
+
+ void inject(PlaceholderTssBaseService placeholderTssBaseService);
+
@Component.Builder
interface Builder {
@BindsInstance
@@ -185,6 +191,9 @@ interface Builder {
@BindsInstance
Builder migrationStateChanges(List migrationStateChanges);
+ @BindsInstance
+ Builder tssBaseService(TssBaseService tssBaseService);
+
HederaInjectionComponent build();
}
}
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/ServicesMain.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/ServicesMain.java
index 459128029091..11124585fd48 100644
--- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/ServicesMain.java
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/ServicesMain.java
@@ -18,29 +18,45 @@
import static com.swirlds.common.io.utility.FileUtils.getAbsolutePath;
import static com.swirlds.common.io.utility.FileUtils.rethrowIO;
+import static com.swirlds.common.threading.manager.AdHocThreadManager.getStaticThreadManager;
import static com.swirlds.logging.legacy.LogMarker.EXCEPTION;
import static com.swirlds.platform.builder.PlatformBuildConstants.DEFAULT_CONFIG_FILE_NAME;
import static com.swirlds.platform.builder.PlatformBuildConstants.DEFAULT_SETTINGS_FILE_NAME;
+import static com.swirlds.platform.builder.internal.StaticPlatformBuilder.getMetricsProvider;
+import static com.swirlds.platform.builder.internal.StaticPlatformBuilder.setupGlobalMetrics;
+import static com.swirlds.platform.config.internal.PlatformConfigUtils.checkConfiguration;
+import static com.swirlds.platform.crypto.CryptoStatic.initNodeSecurity;
+import static com.swirlds.platform.state.signed.StartupStateUtils.getInitialState;
import static com.swirlds.platform.system.SystemExitCode.CONFIGURATION_ERROR;
import static com.swirlds.platform.system.SystemExitCode.NODE_ADDRESS_MISMATCH;
import static com.swirlds.platform.system.SystemExitUtils.exitSystem;
+import static com.swirlds.platform.system.address.AddressBookUtils.createRoster;
+import static com.swirlds.platform.system.address.AddressBookUtils.initializeAddressBook;
import static com.swirlds.platform.util.BootstrapUtils.checkNodesToRun;
import static com.swirlds.platform.util.BootstrapUtils.getNodesToRun;
import static java.util.Objects.requireNonNull;
import com.hedera.node.app.services.OrderedServiceMigrator;
import com.hedera.node.app.services.ServicesRegistryImpl;
+import com.hedera.node.app.tss.impl.PlaceholderTssBaseService;
import com.swirlds.base.time.Time;
import com.swirlds.common.constructable.ConstructableRegistry;
import com.swirlds.common.constructable.RuntimeConstructable;
+import com.swirlds.common.context.PlatformContext;
import com.swirlds.common.crypto.CryptographyFactory;
+import com.swirlds.common.crypto.CryptographyHolder;
+import com.swirlds.common.io.filesystem.FileSystemManager;
import com.swirlds.common.io.utility.FileUtils;
+import com.swirlds.common.io.utility.RecycleBin;
+import com.swirlds.common.merkle.crypto.MerkleCryptoFactory;
+import com.swirlds.common.merkle.crypto.MerkleCryptographyFactory;
import com.swirlds.common.platform.NodeId;
import com.swirlds.config.api.Configuration;
import com.swirlds.config.api.ConfigurationBuilder;
import com.swirlds.config.extensions.sources.SystemEnvironmentConfigSource;
import com.swirlds.config.extensions.sources.SystemPropertiesConfigSource;
import com.swirlds.platform.CommandLineArgs;
+import com.swirlds.platform.ParameterProvider;
import com.swirlds.platform.builder.PlatformBuilder;
import com.swirlds.platform.config.legacy.ConfigurationException;
import com.swirlds.platform.config.legacy.LegacyConfigProperties;
@@ -154,7 +170,7 @@ public static void main(final String... args) throws Exception {
// Determine which node to run locally
// Load config.txt address book file and parse address book
- final AddressBook addressBook = loadAddressBook(DEFAULT_CONFIG_FILE_NAME);
+ final AddressBook bootstrapAddressBook = loadAddressBook(DEFAULT_CONFIG_FILE_NAME);
// parse command line arguments
final CommandLineArgs commandLineArgs = CommandLineArgs.parse(args);
@@ -169,7 +185,7 @@ public static void main(final String... args) throws Exception {
// get the list of configured nodes from the address book
// for each node in the address book, check if it has a local IP (local to this computer)
// additionally if a command line arg is supplied then limit matching nodes to that node id
- final List nodesToRun = getNodesToRun(addressBook, commandLineArgs.localNodesToStart());
+ final List nodesToRun = getNodesToRun(bootstrapAddressBook, commandLineArgs.localNodesToStart());
// hard exit if no nodes are configured to run
checkNodesToRun(nodesToRun);
@@ -178,19 +194,58 @@ public static void main(final String... args) throws Exception {
final SoftwareVersion version = hedera.getSoftwareVersion();
logger.info("Starting node {} with version {}", selfId, version);
- final PlatformBuilder platformBuilder = PlatformBuilder.create(
- Hedera.APP_NAME,
- Hedera.SWIRLD_NAME,
+ final var configuration = buildConfiguration();
+ final var keysAndCerts =
+ initNodeSecurity(bootstrapAddressBook, configuration).get(selfId);
+
+ setupGlobalMetrics(configuration);
+ final var metrics = getMetricsProvider().createPlatformMetrics(selfId);
+ final var time = Time.getCurrent();
+ final var fileSystemManager = FileSystemManager.create(configuration);
+ final var recycleBin =
+ RecycleBin.create(metrics, configuration, getStaticThreadManager(), time, fileSystemManager, selfId);
+
+ final var cryptography = CryptographyFactory.create();
+ CryptographyHolder.set(cryptography);
+ // the AddressBook is not changed after this point, so we calculate the hash now
+ cryptography.digestSync(bootstrapAddressBook);
+
+ // Initialize the Merkle cryptography
+ final var merkleCryptography = MerkleCryptographyFactory.create(configuration, cryptography);
+ MerkleCryptoFactory.set(merkleCryptography);
+
+ // Create the platform context
+ final var platformContext = PlatformContext.create(
+ configuration,
+ Time.getCurrent(),
+ metrics,
+ cryptography,
+ FileSystemManager.create(configuration),
+ recycleBin,
+ merkleCryptography);
+ // Create initial state for the platform
+ final var initialState = getInitialState(
+ platformContext,
version,
hedera::newMerkleStateRoot,
SignedStateFileUtils::readState,
- selfId);
+ Hedera.APP_NAME,
+ Hedera.SWIRLD_NAME,
+ selfId,
+ bootstrapAddressBook);
+
+ // Initialize the address book and set on platform builder
+ final var addressBook =
+ initializeAddressBook(selfId, version, initialState, bootstrapAddressBook, platformContext);
- // Add additional configuration to the platform
- final Configuration configuration = buildConfiguration();
- platformBuilder.withConfiguration(configuration);
- platformBuilder.withCryptography(CryptographyFactory.create());
- platformBuilder.withTime(Time.getCurrent());
+ // Follow the Inversion of Control pattern by injecting all needed dependencies into the PlatformBuilder.
+ final var platformBuilder = PlatformBuilder.create(
+ Hedera.APP_NAME, Hedera.SWIRLD_NAME, version, initialState, selfId)
+ .withPlatformContext(platformContext)
+ .withConfiguration(configuration)
+ .withAddressBook(addressBook)
+ .withRoster(createRoster(addressBook))
+ .withKeysAndCerts(keysAndCerts);
// IMPORTANT: A surface-level reading of this method will undersell the centrality
// of the Hedera instance. It is actually omnipresent throughout both the startup
@@ -233,13 +288,15 @@ private static Configuration buildConfiguration() {
.withSource(SystemPropertiesConfigSource.getInstance());
rethrowIO(() ->
BootstrapUtils.setupConfigBuilder(configurationBuilder, getAbsolutePath(DEFAULT_SETTINGS_FILE_NAME)));
- return configurationBuilder.build();
+ final Configuration configuration = configurationBuilder.build();
+ checkConfiguration(configuration);
+ return configuration;
}
/**
* Selects the node to run locally from either the command line arguments or the address book.
*
- * @param nodesToRun the list of nodes configured to run based on the address book.
+ * @param nodesToRun the list of nodes configured to run based on the address book.
* @param localNodesToStart the node ids specified on the command line.
* @return the node which should be run locally.
* @throws ConfigurationException if more than one node would be started or the requested node is not configured.
@@ -288,6 +345,7 @@ private static AddressBook loadAddressBook(@NonNull final String addressBookPath
try {
final LegacyConfigProperties props =
LegacyConfigPropertiesLoader.loadConfigFile(FileUtils.getAbsolutePath(addressBookPath));
+ props.appConfig().ifPresent(c -> ParameterProvider.getInstance().setParameters(c.params()));
return props.getAddressBook();
} catch (final Exception e) {
logger.error(EXCEPTION.getMarker(), "Error loading address book", e);
@@ -301,6 +359,7 @@ private static Hedera newHedera() {
ConstructableRegistry.getInstance(),
ServicesRegistryImpl::new,
new OrderedServiceMigrator(),
- InstantSource.system());
+ InstantSource.system(),
+ PlaceholderTssBaseService::new);
}
}
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/BlockStreamManager.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/BlockStreamManager.java
index 6f156bfd349c..6b052a3aaf0d 100644
--- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/BlockStreamManager.java
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/BlockStreamManager.java
@@ -22,6 +22,7 @@
import com.swirlds.platform.system.Round;
import com.swirlds.state.State;
import edu.umd.cs.findbugs.annotations.NonNull;
+import java.util.function.BiConsumer;
/**
* Maintains the state and process objects needed to produce the block stream.
@@ -34,11 +35,22 @@
* Items written to the stream will be produced in the order they are written. The leaves of the input and output item
* Merkle trees will be in the order they are written.
*/
-public interface BlockStreamManager extends BlockRecordInfo {
+public interface BlockStreamManager extends BlockRecordInfo, BiConsumer {
+ Bytes ZERO_BLOCK_HASH = Bytes.wrap(new byte[48]);
+
+ /**
+ * Initializes the block stream manager after a restart with the hash of the last block incorporated
+ * in the state used in the restart. If the restart was from genesis, this hash should be the
+ * {@link #ZERO_BLOCK_HASH}.
+ * @param blockHash the hash of the last block
+ */
+ void initLastBlockHash(@NonNull Bytes blockHash);
+
/**
* Updates the internal state of the block stream manager to reflect the start of a new round.
* @param round the round that has just started
* @param state the state of the network at the beginning of the round
+ * @throws IllegalStateException if the last block hash was not explicitly initialized
*/
void startRound(@NonNull Round round, @NonNull State state);
@@ -57,11 +69,4 @@ public interface BlockStreamManager extends BlockRecordInfo {
* @throws IllegalStateException if the stream is closed
*/
void writeItem(@NonNull BlockItem item);
-
- /**
- * Completes the block proof for the given block with the given signature.
- * @param blockNumber the number of the block to finish
- * @param signature the signature to use in the block proof
- */
- void finishBlockProof(long blockNumber, @NonNull Bytes signature);
}
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/BlockStreamService.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/BlockStreamService.java
index c9ea329603a6..91eb6ed62279 100644
--- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/BlockStreamService.java
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/BlockStreamService.java
@@ -21,20 +21,28 @@
import com.hedera.node.app.blocks.schemas.V0540BlockStreamSchema;
import com.hedera.node.config.data.BlockStreamConfig;
+import com.hedera.pbj.runtime.io.buffer.Bytes;
import com.swirlds.config.api.Configuration;
import com.swirlds.state.spi.SchemaRegistry;
import com.swirlds.state.spi.Service;
import edu.umd.cs.findbugs.annotations.NonNull;
+import edu.umd.cs.findbugs.annotations.Nullable;
+import java.util.Optional;
/**
* Service for BlockStreams implementation responsible for tracking state changes
* and writing them to a block
*/
public class BlockStreamService implements Service {
+ public static final Bytes FAKE_RESTART_BLOCK_HASH = Bytes.fromHex("abcd".repeat(24));
+
public static final String NAME = "BlockStreamService";
private final boolean enabled;
+ @Nullable
+ private Bytes migratedLastBlockHash;
+
/**
* Service constructor.
*/
@@ -52,7 +60,20 @@ public String getServiceName() {
public void registerSchemas(@NonNull final SchemaRegistry registry) {
requireNonNull(registry);
if (enabled) {
- registry.register(new V0540BlockStreamSchema());
+ registry.register(new V0540BlockStreamSchema(this::setMigratedLastBlockHash));
}
}
+
+ /**
+ * Returns the last block hash as migrated from a state that used record streams, or empty
+ * if there was no such hash observed during migration.
+ * @return the last block hash
+ */
+ public Optional migratedLastBlockHash() {
+ return Optional.ofNullable(migratedLastBlockHash);
+ }
+
+ private void setMigratedLastBlockHash(@NonNull final Bytes migratedLastBlockHash) {
+ this.migratedLastBlockHash = requireNonNull(migratedLastBlockHash);
+ }
}
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/impl/BlockImplUtils.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/impl/BlockImplUtils.java
index 834aaa8ce3e2..050bc0e045ca 100644
--- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/impl/BlockImplUtils.java
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/impl/BlockImplUtils.java
@@ -215,6 +215,22 @@ public static Bytes appendHash(@NonNull final Bytes hash, @NonNull final Bytes h
return Bytes.wrap(newBytes);
}
+ /**
+ * Hashes the given left and right hashes.
+ * @param leftHash the left hash
+ * @param rightHash the right hash
+ * @return the combined hash
+ */
+ public static Bytes combine(@NonNull final Bytes leftHash, @NonNull final Bytes rightHash) {
+ return Bytes.wrap(combine(leftHash.toByteArray(), rightHash.toByteArray()));
+ }
+
+ /**
+ * Hashes the given left and right hashes.
+ * @param leftHash the left hash
+ * @param rightHash the right hash
+ * @return the combined hash
+ */
public static byte[] combine(final byte[] leftHash, final byte[] rightHash) {
try {
final var digest = MessageDigest.getInstance(DigestType.SHA_384.algorithmName());
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/impl/BlockStreamManagerImpl.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/impl/BlockStreamManagerImpl.java
index ebf4e495684f..8211b2fe68a7 100644
--- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/impl/BlockStreamManagerImpl.java
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/impl/BlockStreamManagerImpl.java
@@ -17,6 +17,8 @@
package com.hedera.node.app.blocks.impl;
import static com.hedera.hapi.node.base.BlockHashAlgorithm.SHA2_384;
+import static com.hedera.hapi.util.HapiUtils.asInstant;
+import static com.hedera.node.app.blocks.impl.BlockImplUtils.appendHash;
import static com.hedera.node.app.blocks.impl.BlockImplUtils.combine;
import static com.hedera.node.app.blocks.schemas.V0540BlockStreamSchema.BLOCK_STREAM_INFO_KEY;
import static com.hedera.node.app.hapi.utils.CommonUtils.noThrowSha384HashOf;
@@ -28,25 +30,27 @@
import com.hedera.hapi.block.stream.BlockItem;
import com.hedera.hapi.block.stream.BlockProof;
+import com.hedera.hapi.block.stream.MerkleSiblingHash;
import com.hedera.hapi.block.stream.output.BlockHeader;
import com.hedera.hapi.block.stream.output.TransactionResult;
import com.hedera.hapi.node.base.SemanticVersion;
import com.hedera.hapi.node.base.Timestamp;
import com.hedera.hapi.node.state.blockstream.BlockStreamInfo;
+import com.hedera.hapi.platform.state.PlatformState;
import com.hedera.node.app.blocks.BlockItemWriter;
import com.hedera.node.app.blocks.BlockStreamManager;
import com.hedera.node.app.blocks.BlockStreamService;
import com.hedera.node.app.blocks.StreamingTreeHasher;
import com.hedera.node.app.records.impl.BlockRecordInfoUtils;
+import com.hedera.node.app.tss.TssBaseService;
import com.hedera.node.config.ConfigProvider;
import com.hedera.node.config.data.BlockRecordStreamConfig;
import com.hedera.node.config.data.BlockStreamConfig;
-import com.hedera.node.config.data.HederaConfig;
import com.hedera.node.config.data.VersionConfig;
import com.hedera.pbj.runtime.io.buffer.Bytes;
import com.swirlds.config.api.Configuration;
import com.swirlds.platform.state.service.PlatformStateService;
-import com.swirlds.platform.state.service.ReadablePlatformStateStore;
+import com.swirlds.platform.state.service.schemas.V0540PlatformStateSchema;
import com.swirlds.platform.system.Round;
import com.swirlds.state.State;
import com.swirlds.state.spi.CommittableWritableStates;
@@ -69,26 +73,25 @@
public class BlockStreamManagerImpl implements BlockStreamManager {
private static final Logger log = LogManager.getLogger(BlockStreamManagerImpl.class);
- private static final Bytes MOCK_HASH = Bytes.wrap(new byte[48]);
private static final int CHUNK_SIZE = 8;
private static final CompletableFuture MOCK_START_STATE_ROOT_HASH_FUTURE =
completedFuture(Bytes.wrap(new byte[48]));
private final int roundsPerBlock;
+ private final TssBaseService tssBaseService;
private final SemanticVersion hapiVersion;
- private final SemanticVersion nodeVersion;
private final ExecutorService executor;
- private final BlockHashManager blockHashManager;
- private final RunningHashManager runningHashManager;
private final Supplier writerSupplier;
private final BoundaryStateChangeListener boundaryStateChangeListener;
- // All this state is scoped to producing the block for the last-started round
+ private final BlockHashManager blockHashManager;
+ private final RunningHashManager runningHashManager;
+
+ // All this state is scoped to producing the current block
private long blockNumber;
// Set to the round number of the last round handled before entering a freeze period
private long freezeRoundNumber = -1;
- // FUTURE - initialize to the actual last block hash (this is only correct at genesis)
- private Bytes lastBlockHash = Bytes.wrap(new byte[48]);
+ private Bytes lastBlockHash;
private Instant blockTimestamp;
private BlockItemWriter writer;
private List pendingItems;
@@ -101,15 +104,24 @@ public class BlockStreamManagerImpl implements BlockStreamManager {
*/
private CompletableFuture writeFuture = completedFuture(null);
+ // (FUTURE) Remove this once reconnect protocol also transmits the last block hash
+ private boolean appendRealHashes = false;
+
/**
* Represents a block pending completion by the block hash signature needed for its block proof.
*
- * @param blockNumber the block number
+ * @param number the block number
+ * @param blockHash the block hash
* @param proofBuilder the block proof builder
* @param writer the block item writer
+ * @param siblingHashes the sibling hashes needed for an indirect block proof of an earlier block
*/
private record PendingBlock(
- long blockNumber, @NonNull BlockProof.Builder proofBuilder, @NonNull BlockItemWriter writer) {}
+ long number,
+ @NonNull Bytes blockHash,
+ @NonNull BlockProof.Builder proofBuilder,
+ @NonNull BlockItemWriter writer,
+ @NonNull MerkleSiblingHash... siblingHashes) {}
/**
* A queue of blocks pending completion by the block hash signature needed for their block proofs.
@@ -121,23 +133,36 @@ public BlockStreamManagerImpl(
@NonNull final Supplier writerSupplier,
@NonNull final ExecutorService executor,
@NonNull final ConfigProvider configProvider,
+ @NonNull final TssBaseService tssBaseService,
@NonNull final BoundaryStateChangeListener boundaryStateChangeListener) {
this.writerSupplier = requireNonNull(writerSupplier);
this.executor = requireNonNull(executor);
+ this.tssBaseService = requireNonNull(tssBaseService);
this.boundaryStateChangeListener = requireNonNull(boundaryStateChangeListener);
- final var config = requireNonNull(configProvider).getConfiguration();
+ requireNonNull(configProvider);
+ final var config = configProvider.getConfiguration();
this.hapiVersion = hapiVersionFrom(config);
- this.nodeVersion = nodeVersionFrom(config);
this.roundsPerBlock = config.getConfigData(BlockStreamConfig.class).roundsPerBlock();
this.blockHashManager = new BlockHashManager(config);
this.runningHashManager = new RunningHashManager();
}
+ @Override
+ public void initLastBlockHash(@NonNull final Bytes blockHash) {
+ lastBlockHash = requireNonNull(blockHash);
+ }
+
@Override
public void startRound(@NonNull final Round round, @NonNull final State state) {
- // We will always close the block at the end of the freeze round, even if
- // its number would not otherwise trigger a block closing
- if (isFreezeRound(state, round)) {
+ if (lastBlockHash == null) {
+ throw new IllegalStateException("Last block hash must be initialized before starting a round");
+ }
+ final var platformState = state.getReadableStates(PlatformStateService.NAME)
+ .getSingleton(V0540PlatformStateSchema.PLATFORM_STATE_KEY)
+ .get();
+ requireNonNull(platformState);
+ if (isFreezeRound(platformState, round)) {
+ // Track freeze round numbers because they always end a block
freezeRoundNumber = round.getRoundNum();
}
if (writer == null) {
@@ -159,7 +184,7 @@ public void startRound(@NonNull final Round round, @NonNull final State state) {
.number(blockNumber)
.previousBlockHash(lastBlockHash)
.hashAlgorithm(SHA2_384)
- .softwareVersion(nodeVersion)
+ .softwareVersion(platformState.creationSoftwareVersionOrThrow())
.hapiProtoVersion(hapiVersion))
.build());
@@ -172,7 +197,8 @@ public void endRound(@NonNull final State state, final long roundNum) {
if (shouldCloseBlock(roundNum, roundsPerBlock)) {
final var writableState = state.getWritableStates(BlockStreamService.NAME);
final var blockStreamInfoState = writableState.getSingleton(BLOCK_STREAM_INFO_KEY);
- // Ensure all runningHashManager futures are complete
+ // Ensure runningHashManager futures include all result items and are completed
+ schedulePendingWork();
writeFuture.join();
// Commit the block stream info to state before flushing the boundary state changes
blockStreamInfoState.put(new BlockStreamInfo(
@@ -184,53 +210,28 @@ public void endRound(@NonNull final State state, final long roundNum) {
schedulePendingWork();
writeFuture.join();
- final var inputRootHash = inputTreeHasher.rootHash().join();
- final var outputRootHash = outputTreeHasher.rootHash().join();
+ final var inputHash = inputTreeHasher.rootHash().join();
+ final var outputHash = outputTreeHasher.rootHash().join();
final var blockStartStateHash = MOCK_START_STATE_ROOT_HASH_FUTURE.join();
- final var blockHash = computeBlockHash(lastBlockHash, inputRootHash, outputRootHash, blockStartStateHash);
- // FUTURE: sign the block hash and gossip our signature
-
- final var blockProofBuilder = BlockProof.newBuilder()
+ final var leftParent = combine(lastBlockHash, inputHash);
+ final var rightParent = combine(outputHash, blockStartStateHash);
+ final var blockHash = combine(leftParent, rightParent);
+ final var pendingProof = BlockProof.newBuilder()
.block(blockNumber)
.previousBlockRootHash(lastBlockHash)
.startOfBlockStateRootHash(blockStartStateHash);
- pendingBlocks.add(new PendingBlock(blockNumber, blockProofBuilder, writer));
+ pendingBlocks.add(new PendingBlock(
+ blockNumber,
+ blockHash,
+ pendingProof,
+ writer,
+ new MerkleSiblingHash(false, inputHash),
+ new MerkleSiblingHash(false, rightParent)));
// Update in-memory state to prepare for the next block
lastBlockHash = blockHash;
writer = null;
- // Simulate the completion of the block proof
- final long blockNumberToComplete = this.blockNumber;
- CompletableFuture.runAsync(
- () -> {
- try {
- finishBlockProof(blockNumberToComplete, Bytes.wrap(new byte[48]));
- } catch (Exception e) {
- log.error("Failed to finish proof for block {}", blockNumberToComplete, e);
- }
- },
- executor);
- }
- }
-
- /**
- * {@inheritDoc}
- * Synchronized to ensure that block proofs are always written in order, even in edge cases where multiple
- * pending block proofs become available at the same time.
- * @param blockNumber the number of the block to finish
- * @param signature the signature to use in the block proof
- */
- @Override
- public synchronized void finishBlockProof(final long blockNumber, @NonNull final Bytes signature) {
- requireNonNull(signature);
- while (!pendingBlocks.isEmpty() && pendingBlocks.peek().blockNumber() <= blockNumber) {
- final var block = pendingBlocks.poll();
- // Note the actual proof for an earlier block number awaiting proof will be more complicated than this
- final var proof = block.proofBuilder().blockSignature(signature).build();
- block.writer()
- .writeItem(BlockItem.PROTOBUF.toBytes(
- BlockItem.newBuilder().blockProof(proof).build()))
- .closeBlock();
+ tssBaseService.requestLedgerSignature(blockHash.toByteArray());
}
}
@@ -244,6 +245,10 @@ public void writeItem(@NonNull final BlockItem item) {
@Override
public @Nullable Bytes prngSeed() {
+ // Incorporate all pending results before returning the seed to guarantee
+ // no two consecutive transactions ever get the same seed
+ schedulePendingWork();
+ writeFuture.join();
final var seed = runningHashManager.nMinus3HashFuture.join();
return seed == null ? null : Bytes.wrap(seed);
}
@@ -263,6 +268,59 @@ public long blockNo() {
return blockHashManager.hashOfBlock(blockNo);
}
+ /**
+ * Synchronized to ensure that block proofs are always written in order, even in edge cases where multiple
+ * pending block proofs become available at the same time.
+ *
+ * @param message the number of the block to finish
+ * @param signature the signature to use in the block proof
+ */
+ @Override
+ public synchronized void accept(@NonNull final byte[] message, @NonNull final byte[] signature) {
+ // Find the block whose hash as the signed message, tracking any sibling hashes
+ // needed for indirect proofs of earlier blocks along the way
+ long blockNumber = Long.MIN_VALUE;
+ boolean impliesIndirectProof = false;
+ final List> siblingHashes = new ArrayList<>();
+ final var blockHash = Bytes.wrap(message);
+ for (final var block : pendingBlocks) {
+ if (impliesIndirectProof) {
+ siblingHashes.add(List.of(block.siblingHashes()));
+ }
+ if (block.blockHash().equals(blockHash)) {
+ blockNumber = block.number();
+ break;
+ }
+ impliesIndirectProof = true;
+ }
+ if (blockNumber == Long.MIN_VALUE) {
+ log.info("Ignoring signature on already proven block hash '{}'", blockHash);
+ return;
+ }
+ // Write proofs for all pending blocks up to and including the signed block number
+ final var blockSignature = Bytes.wrap(signature);
+ while (!pendingBlocks.isEmpty() && pendingBlocks.peek().number() <= blockNumber) {
+ final var block = pendingBlocks.poll();
+ final var proof = block.proofBuilder()
+ .blockSignature(blockSignature)
+ .siblingHashes(siblingHashes.stream().flatMap(List::stream).toList());
+ block.writer()
+ .writeItem(BlockItem.PROTOBUF.toBytes(
+ BlockItem.newBuilder().blockProof(proof).build()))
+ .closeBlock();
+ if (block.number() != blockNumber) {
+ siblingHashes.removeFirst();
+ }
+ }
+ }
+
+ /**
+ * (FUTURE) Remove this after reconnect protocol also transmits the last block hash.
+ */
+ public void appendRealHashes() {
+ this.appendRealHashes = true;
+ }
+
private void schedulePendingWork() {
final var scheduledWork = new ScheduledWork(pendingItems);
final var pendingSerialization = CompletableFuture.supplyAsync(scheduledWork::serializeItems, executor);
@@ -270,16 +328,6 @@ private void schedulePendingWork() {
pendingItems = new ArrayList<>();
}
- private Bytes computeBlockHash(
- @NonNull final Bytes prevBlockHash,
- @NonNull final Bytes inputRootHash,
- @NonNull final Bytes outputRootHash,
- @NonNull final Bytes stateRootHash) {
- final var leftParent = combine(prevBlockHash.toByteArray(), inputRootHash.toByteArray());
- final var rightParent = combine(outputRootHash.toByteArray(), stateRootHash.toByteArray());
- return Bytes.wrap(combine(leftParent, rightParent));
- }
-
private @NonNull BlockStreamInfo blockStreamInfoFrom(@NonNull final State state) {
final var blockStreamInfoState =
state.getReadableStates(BlockStreamService.NAME).getSingleton(BLOCK_STREAM_INFO_KEY);
@@ -290,10 +338,11 @@ private boolean shouldCloseBlock(final long roundNumber, final int roundsPerBloc
return roundNumber % roundsPerBlock == 0 || roundNumber == freezeRoundNumber;
}
- private boolean isFreezeRound(@NonNull final State state, @NonNull final Round round) {
- final var platformState = new ReadablePlatformStateStore(state.getReadableStates(PlatformStateService.NAME));
+ private boolean isFreezeRound(@NonNull final PlatformState platformState, @NonNull final Round round) {
return isInFreezePeriod(
- round.getConsensusTimestamp(), platformState.getFreezeTime(), platformState.getLastFrozenTime());
+ round.getConsensusTimestamp(),
+ platformState.freezeTime() == null ? null : asInstant(platformState.freezeTime()),
+ platformState.lastFrozenTime() == null ? null : asInstant(platformState.lastFrozenTime()));
}
/**
@@ -361,18 +410,6 @@ public Void combineSerializedItems(@Nullable Void ignore, @NonNull final List
+ *
We never know the hash of the {@code N+1} block currently being created.
+ *
We start every block {@code N} by concatenating the {@code N-1} block hash to the trailing
+ * hashes up to block {@code N-2} that were in state at the end of block {@code N-1}.
+ *
*
* @param blockNo the block number
* @return the hash of the block with the given number, or null if it is not available
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/schemas/V0540BlockStreamSchema.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/schemas/V0540BlockStreamSchema.java
index 2f248cf545d6..5659885d460b 100644
--- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/schemas/V0540BlockStreamSchema.java
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/schemas/V0540BlockStreamSchema.java
@@ -17,6 +17,7 @@
package com.hedera.node.app.blocks.schemas;
import static com.hedera.node.app.blocks.impl.BlockImplUtils.appendHash;
+import static com.hedera.node.app.records.impl.BlockRecordInfoUtils.blockHashByBlockNumber;
import static java.util.Objects.requireNonNull;
import com.hedera.hapi.node.base.SemanticVersion;
@@ -30,25 +31,20 @@
import com.swirlds.state.spi.StateDefinition;
import edu.umd.cs.findbugs.annotations.NonNull;
import java.util.Set;
+import java.util.function.Consumer;
/**
- * Defines the schema for two forms of state,
+ * Defines the schema for state with two notable properties:
*
- *
State needed for a new or reconnected node to construct the next block exactly as will
+ *
It is needed for a new or reconnected node to construct the next block exactly as will
* nodes already in the network.
- *
State derived from the block stream, and hence the natural provenance of the same service
+ *
It is derived from the block stream, and hence the natural provenance of the same service
* that is managing and producing blocks.
*
*
- * The two pieces of state in the first category are,
+ * The particular items with these properties are,
*
*
The number of the last completed block, which each node must increment in the next block.
- *
The hash of the last completed block, which each node must include in the header and proof
- * of the next block.
- *
- *
- * State in the second category has three parts,
- *
*
The first consensus time of the last finished block, for comparison with the consensus
* time at the start of the current block. Depending on the elapsed period between these times,
* the network may deterministically choose to purge expired entities, adjust node stakes and
@@ -69,11 +65,14 @@ public class V0540BlockStreamSchema extends Schema {
private static final SemanticVersion VERSION =
SemanticVersion.newBuilder().major(0).minor(54).patch(0).build();
+ private final Consumer migratedBlockHashConsumer;
+
/**
* Schema constructor.
*/
- public V0540BlockStreamSchema() {
+ public V0540BlockStreamSchema(@NonNull final Consumer migratedBlockHashConsumer) {
super(VERSION);
+ this.migratedBlockHashConsumer = requireNonNull(migratedBlockHashConsumer);
}
@Override
@@ -94,10 +93,22 @@ public void migrate(@NonNull final MigrationContext ctx) {
(BlockInfo) requireNonNull(ctx.sharedValues().get(SHARED_BLOCK_RECORD_INFO));
final RunningHashes runningHashes =
(RunningHashes) requireNonNull(ctx.sharedValues().get(SHARED_RUNNING_HASHES));
+ // Note that it is impossible to put the hash of block N into a state that includes
+ // the state changes from block N, because the hash of block N is a function of exactly
+ // those state changes---so act of putting the hash in state would change it; as a result,
+ // the correct way to migrate from a record stream-based state is to save its last
+ // block hash as the last block hash of the new state; and create a BlockStreamInfo with
+ // the remaining block hashes
+ final var lastBlockHash =
+ requireNonNull(blockHashByBlockNumber(blockInfo, blockInfo.lastBlockNumber()));
+ migratedBlockHashConsumer.accept(lastBlockHash);
+ final var trailingBlockHashes = blockInfo
+ .blockHashes()
+ .slice(lastBlockHash.length(), blockInfo.blockHashes().length() - lastBlockHash.length());
state.put(BlockStreamInfo.newBuilder()
.blockTime(blockInfo.firstConsTimeOfLastBlock())
.blockNumber(blockInfo.lastBlockNumber())
- .trailingBlockHashes(blockInfo.blockHashes())
+ .trailingBlockHashes(trailingBlockHashes)
.trailingOutputHashes(appendedHashes(runningHashes))
.build());
}
@@ -105,10 +116,10 @@ public void migrate(@NonNull final MigrationContext ctx) {
}
private Bytes appendedHashes(final RunningHashes runningHashes) {
- Bytes appendedHashes = Bytes.EMPTY;
- appendedHashes = appendHash(runningHashes.nMinus3RunningHash(), appendedHashes, 4);
- appendedHashes = appendHash(runningHashes.nMinus2RunningHash(), appendedHashes, 4);
- appendedHashes = appendHash(runningHashes.nMinus1RunningHash(), appendedHashes, 4);
- return appendHash(runningHashes.runningHash(), appendedHashes, 4);
+ var hashes = Bytes.EMPTY;
+ hashes = appendHash(runningHashes.nMinus3RunningHash(), hashes, 4);
+ hashes = appendHash(runningHashes.nMinus2RunningHash(), hashes, 4);
+ hashes = appendHash(runningHashes.nMinus1RunningHash(), hashes, 4);
+ return appendHash(runningHashes.runningHash(), hashes, 4);
}
}
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/records/impl/BlockRecordManagerImpl.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/records/impl/BlockRecordManagerImpl.java
index 170956e57f42..f7523a1841b9 100644
--- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/records/impl/BlockRecordManagerImpl.java
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/records/impl/BlockRecordManagerImpl.java
@@ -187,7 +187,7 @@ public boolean startUserTransaction(@NonNull final Instant consensusTime, @NonNu
// Also check to see if this is the first transaction we're handling after a freeze restart. If so, we also
// start a new block.
final var isFirstTransactionAfterFreezeRestart = platformState.freezeTime() != null
- && platformState.freezeTimeOrThrow().equals(platformState.freezeTime());
+ && platformState.freezeTimeOrThrow().equals(platformState.lastFrozenTime());
if (isFirstTransactionAfterFreezeRestart) {
new WritablePlatformStateStore(state.getWritableStates(PlatformStateService.NAME)).setFreezeTime(null);
}
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/TssBaseService.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/TssBaseService.java
new file mode 100644
index 000000000000..b131df78c200
--- /dev/null
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/TssBaseService.java
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2024 Hedera Hashgraph, LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.hedera.node.app.tss;
+
+import com.swirlds.state.spi.Service;
+import edu.umd.cs.findbugs.annotations.NonNull;
+import java.util.function.BiConsumer;
+
+/**
+ * The TssBaseService will attempt to generate TSS key material for any set candidate roster, giving it a ledger id and
+ * the ability to generate ledger signatures that can be verified by the ledger id. Once the candidate roster has
+ * received its full TSS key material, it can be made available for adoption by the platform.
+ *
+ * The TssBaseService will also attempt to generate ledger signatures by aggregating share signatures produced by
+ * calling {@link #requestLedgerSignature(byte[])}.
+ */
+public interface TssBaseService extends Service {
+ String NAME = "TssBaseService";
+
+ @NonNull
+ @Override
+ default String getServiceName() {
+ return NAME;
+ }
+
+ /**
+ * Requests a ledger signature on a message hash. The ledger signature is computed asynchronously and returned
+ * to all consumers that have been registered through {@link #registerLedgerSignatureConsumer}.
+ *
+ * @param messageHash The hash of the message to be signed by the ledger.
+ */
+ void requestLedgerSignature(@NonNull byte[] messageHash);
+
+ /**
+ * Registers a consumer of the message hash and the ledger signature on the message hash.
+ *
+ * @param consumer the consumer of ledger signatures and message hashes.
+ */
+ void registerLedgerSignatureConsumer(@NonNull BiConsumer consumer);
+
+ /**
+ * Unregisters a consumer of the message hash and the ledger signature on the message hash.
+ *
+ * @param consumer the consumer of ledger signatures and message hashes to unregister.
+ */
+ void unregisterLedgerSignatureConsumer(@NonNull BiConsumer consumer);
+}
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/impl/PlaceholderTssBaseService.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/impl/PlaceholderTssBaseService.java
new file mode 100644
index 000000000000..6c0017c91a1d
--- /dev/null
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/tss/impl/PlaceholderTssBaseService.java
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2024 Hedera Hashgraph, LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.hedera.node.app.tss.impl;
+
+import static com.hedera.node.app.hapi.utils.CommonUtils.noThrowSha384HashOf;
+import static java.util.Objects.requireNonNull;
+
+import com.hedera.node.app.tss.TssBaseService;
+import com.swirlds.common.utility.CommonUtils;
+import com.swirlds.state.spi.SchemaRegistry;
+import edu.umd.cs.findbugs.annotations.NonNull;
+import java.util.List;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.ExecutorService;
+import java.util.function.BiConsumer;
+import javax.inject.Inject;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+/**
+ * Placeholder for the TSS base service, added to support testing production of indirect block proofs,
+ * c.f. this issue.
+ */
+public class PlaceholderTssBaseService implements TssBaseService {
+ private static final Logger log = LogManager.getLogger(PlaceholderTssBaseService.class);
+
+ /**
+ * Copy-on-write list to avoid concurrent modification exceptions if a consumer unregisters
+ * itself in its callback.
+ */
+ private final List> consumers = new CopyOnWriteArrayList<>();
+
+ private ExecutorService executor;
+
+ @Inject
+ public void setExecutor(@NonNull final ExecutorService executor) {
+ this.executor = requireNonNull(executor);
+ }
+
+ @Override
+ public void registerSchemas(@NonNull final SchemaRegistry registry) {
+ // FUTURE - add required schemas
+ }
+
+ @Override
+ public void requestLedgerSignature(@NonNull final byte[] messageHash) {
+ requireNonNull(messageHash);
+ requireNonNull(executor);
+ // The "signature" is a hash of the message hash
+ final var mockSignature = noThrowSha384HashOf(messageHash);
+ // Simulate asynchronous completion of the ledger signature
+ CompletableFuture.runAsync(
+ () -> consumers.forEach(consumer -> {
+ try {
+ consumer.accept(messageHash, mockSignature);
+ } catch (Exception e) {
+ log.error(
+ "Failed to provide signature {} on message {} to consumer {}",
+ CommonUtils.hex(mockSignature),
+ CommonUtils.hex(messageHash),
+ consumer,
+ e);
+ }
+ }),
+ executor);
+ }
+
+ @Override
+ public void registerLedgerSignatureConsumer(@NonNull final BiConsumer consumer) {
+ requireNonNull(consumer);
+ consumers.add(consumer);
+ }
+
+ @Override
+ public void unregisterLedgerSignatureConsumer(@NonNull final BiConsumer consumer) {
+ requireNonNull(consumer);
+ consumers.remove(consumer);
+ }
+}
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/version/HederaSoftwareVersion.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/version/HederaSoftwareVersion.java
index ae87b7764428..c8f44252f68c 100644
--- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/version/HederaSoftwareVersion.java
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/version/HederaSoftwareVersion.java
@@ -39,7 +39,9 @@
* completely different from each other.
*
*
The Services version is the version of the node software itself.
+ * This will be removed once we stop supporting 0.53.0 and earlier versions.
*/
+@Deprecated(forRemoval = true)
public class HederaSoftwareVersion implements SoftwareVersion {
public static final long CLASS_ID = 0x6f2b1bc2df8cbd0cL;
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/HandleWorkflow.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/HandleWorkflow.java
index 96e414535a47..738b27c3ce5d 100644
--- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/HandleWorkflow.java
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/HandleWorkflow.java
@@ -363,7 +363,8 @@ private HandleOutput execute(@NonNull final UserTxn userTxn) {
dispatchProcessor.processDispatch(dispatch);
updateWorkflowMetrics(userTxn);
}
- final var handleOutput = userTxn.stack().buildHandleOutput(userTxn.consensusNow());
+ final var handleOutput =
+ userTxn.stack().buildHandleOutput(userTxn.consensusNow(), exchangeRateManager.exchangeRates());
// Note that we don't yet support producing ONLY blocks, because we haven't integrated
// translators from block items to records for answering queries
if (blockStreamConfig.streamRecords()) {
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/stack/SavepointStackImpl.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/stack/SavepointStackImpl.java
index 63c6ad4a8bba..78dd643535c2 100644
--- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/stack/SavepointStackImpl.java
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/stack/SavepointStackImpl.java
@@ -29,6 +29,7 @@
import com.hedera.hapi.block.stream.BlockItem;
import com.hedera.hapi.node.base.TransactionID;
+import com.hedera.hapi.node.transaction.ExchangeRateSet;
import com.hedera.node.app.blocks.impl.BoundaryStateChangeListener;
import com.hedera.node.app.blocks.impl.KVStateChangeListener;
import com.hedera.node.app.blocks.impl.PairedStreamBuilder;
@@ -59,8 +60,6 @@
import java.util.List;
import java.util.Map;
import java.util.function.Consumer;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
/**
* A stack of savepoints scoped to a dispatch. Each savepoint captures the state of the {@link State} at the time
@@ -68,7 +67,6 @@
* the stream builders created in the savepoint.
*/
public class SavepointStackImpl implements HandleContext.SavepointStack, State {
- private static final Logger log = LogManager.getLogger(SavepointStackImpl.class);
private final State state;
private final Deque stack = new ArrayDeque<>();
private final Map writableStatesMap = new HashMap<>();
@@ -444,9 +442,11 @@ Savepoint peek() {
* Builds all the records for the user transaction.
*
* @param consensusTime consensus time of the transaction
+ * @param exchangeRates the active exchange rates
* @return the stream of records
*/
- public HandleOutput buildHandleOutput(@NonNull final Instant consensusTime) {
+ public HandleOutput buildHandleOutput(
+ @NonNull final Instant consensusTime, @NonNull final ExchangeRateSet exchangeRates) {
final List blockItems;
Instant lastAssignedConsenusTime = consensusTime;
if (streamMode == RECORDS) {
@@ -481,10 +481,16 @@ public HandleOutput buildHandleOutput(@NonNull final Instant consensusTime) {
final var consensusNow = consensusTime.plusNanos((long) i - indexOfUserRecord);
lastAssignedConsenusTime = consensusNow;
builder.consensusTimestamp(consensusNow);
- if (i > indexOfUserRecord && builder.category() != SCHEDULED) {
- // Only set exchange rates on transactions preceding the user transaction, since
- // no subsequent child can change the exchange rate
- builder.parentConsensus(consensusTime).exchangeRate(null);
+ if (i > indexOfUserRecord) {
+ if (builder.category() != SCHEDULED) {
+ // Only set exchange rates on transactions preceding the user transaction, since
+ // no subsequent child can change the exchange rate
+ builder.parentConsensus(consensusTime).exchangeRate(null);
+ } else {
+ // But for backward compatibility keep setting rates on scheduled receipts, c.f.
+ // https://github.com/hashgraph/hedera-services/issues/15393
+ builder.exchangeRate(exchangeRates);
+ }
}
switch (streamMode) {
case RECORDS -> records.add(((RecordStreamBuilder) builder).build());
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/standalone/ExecutorComponent.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/standalone/ExecutorComponent.java
index 7b50038e619f..1eb3dc722971 100644
--- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/standalone/ExecutorComponent.java
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/standalone/ExecutorComponent.java
@@ -19,6 +19,7 @@
import com.hedera.node.app.authorization.AuthorizerInjectionModule;
import com.hedera.node.app.config.BootstrapConfigProviderImpl;
import com.hedera.node.app.config.ConfigProviderImpl;
+import com.hedera.node.app.fees.ExchangeRateManager;
import com.hedera.node.app.service.contract.impl.ContractServiceImpl;
import com.hedera.node.app.service.file.impl.FileServiceImpl;
import com.hedera.node.app.services.ServicesInjectionModule;
@@ -81,5 +82,7 @@ interface Builder {
StateNetworkInfo stateNetworkInfo();
+ ExchangeRateManager exchangeRateManager();
+
StandaloneDispatchFactory standaloneDispatchFactory();
}
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/standalone/TransactionExecutors.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/standalone/TransactionExecutors.java
index 7f09f43fb1dd..fd2d7df81f39 100644
--- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/standalone/TransactionExecutors.java
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/standalone/TransactionExecutors.java
@@ -16,6 +16,8 @@
package com.hedera.node.app.workflows.standalone;
+import static com.hedera.node.app.workflows.standalone.impl.NoopVerificationStrategies.NOOP_VERIFICATION_STRATEGIES;
+
import com.hedera.node.app.config.BootstrapConfigProviderImpl;
import com.hedera.node.app.config.ConfigProviderImpl;
import com.hedera.node.app.service.contract.impl.ContractServiceImpl;
@@ -52,11 +54,14 @@ public TransactionExecutor newExecutor(@NonNull final State state, @NonNull fina
final var executor = newExecutorComponent(properties);
executor.initializer().accept(state);
executor.stateNetworkInfo().initFrom(state);
+ final var exchangeRateManager = executor.exchangeRateManager();
return (transactionBody, consensusNow, operationTracers) -> {
final var dispatch = executor.standaloneDispatchFactory().newDispatch(state, transactionBody, consensusNow);
OPERATION_TRACERS.set(List.of(operationTracers));
executor.dispatchProcessor().processDispatch(dispatch);
- return dispatch.stack().buildHandleOutput(consensusNow).recordsOrThrow();
+ return dispatch.stack()
+ .buildHandleOutput(consensusNow, exchangeRateManager.exchangeRates())
+ .recordsOrThrow();
};
}
@@ -68,7 +73,8 @@ private ExecutorComponent newExecutorComponent(@NonNull final Map Decision.VALID;
+
+ @Override
+ public VerificationStrategy activatingOnlyContractKeysFor(
+ @NonNull final Address sender,
+ final boolean requiresDelegatePermission,
+ @NonNull final HederaNativeOperations nativeOperations) {
+ return NOOP_VERIFICATION_STRATEGY;
+ }
+}
diff --git a/hedera-node/hedera-app/src/main/java/module-info.java b/hedera-node/hedera-app/src/main/java/module-info.java
index dc8c5c2fcab5..eeeb435abd80 100644
--- a/hedera-node/hedera-app/src/main/java/module-info.java
+++ b/hedera-node/hedera-app/src/main/java/module-info.java
@@ -41,6 +41,7 @@
requires com.swirlds.merkledb;
requires com.swirlds.virtualmap;
requires com.google.common;
+ requires com.google.errorprone.annotations;
requires com.google.protobuf;
requires io.grpc.netty;
requires io.grpc;
@@ -48,6 +49,7 @@
requires io.netty.transport.classes.epoll;
requires io.netty.transport;
requires org.apache.commons.lang3;
+ requires org.hyperledger.besu.datatypes;
requires static com.github.spotbugs.annotations;
requires static com.google.auto.service;
requires static java.compiler;
@@ -106,6 +108,8 @@
exports com.hedera.node.app.blocks.impl;
exports com.hedera.node.app.workflows.handle.metric;
exports com.hedera.node.app.roster;
+ exports com.hedera.node.app.tss;
+ exports com.hedera.node.app.tss.impl;
provides ConfigurationExtension with
ServicesConfigExtension;
diff --git a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/ServicesMainTest.java b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/ServicesMainTest.java
index cbb7c9add4e0..b366ba087f40 100644
--- a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/ServicesMainTest.java
+++ b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/ServicesMainTest.java
@@ -20,21 +20,36 @@
import static org.assertj.core.api.Assertions.assertThatThrownBy;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.instanceOf;
+import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mock.Strictness.LENIENT;
import static org.mockito.Mockito.mockStatic;
import com.hedera.node.app.version.ServicesSoftwareVersion;
+import com.swirlds.common.context.PlatformContext;
+import com.swirlds.common.crypto.Cryptography;
+import com.swirlds.common.io.filesystem.FileSystemManager;
+import com.swirlds.common.io.utility.RecycleBin;
+import com.swirlds.common.merkle.crypto.MerkleCryptography;
+import com.swirlds.common.metrics.platform.DefaultMetricsProvider;
import com.swirlds.common.platform.NodeId;
+import com.swirlds.config.api.Configuration;
+import com.swirlds.metrics.api.Metrics;
+import com.swirlds.platform.builder.PlatformBuilder;
import com.swirlds.platform.config.legacy.ConfigurationException;
import com.swirlds.platform.config.legacy.LegacyConfigProperties;
import com.swirlds.platform.config.legacy.LegacyConfigPropertiesLoader;
import com.swirlds.platform.state.MerkleStateRoot;
+import com.swirlds.platform.state.signed.ReservedSignedState;
+import com.swirlds.platform.state.signed.SignedState;
+import com.swirlds.platform.system.Platform;
import com.swirlds.platform.system.SystemExitUtils;
+import com.swirlds.platform.system.address.AddressBook;
import com.swirlds.platform.util.BootstrapUtils;
import java.util.ArrayList;
import java.util.List;
-import org.junit.jupiter.api.Assertions;
+import java.util.function.BiFunction;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.Mock;
@@ -47,9 +62,48 @@ final class ServicesMainTest {
mockStatic(LegacyConfigPropertiesLoader.class);
private static final MockedStatic bootstrapUtilsMockedStatic = mockStatic(BootstrapUtils.class);
- @Mock
+ @Mock(strictness = LENIENT)
private LegacyConfigProperties legacyConfigProperties;
+ @Mock(strictness = LENIENT)
+ private AddressBook addressBook;
+
+ @Mock(strictness = LENIENT)
+ private DefaultMetricsProvider metricsProvider;
+
+ @Mock(strictness = LENIENT)
+ private Metrics metrics;
+
+ @Mock(strictness = LENIENT)
+ private FileSystemManager fileSystemManager;
+
+ @Mock(strictness = LENIENT)
+ private RecycleBin recycleBin;
+
+ @Mock(strictness = LENIENT)
+ private MerkleCryptography merkleCryptography;
+
+ @Mock(strictness = LENIENT)
+ BiFunction merkleCryptographyFn;
+
+ @Mock(strictness = LENIENT)
+ private PlatformContext platformContext;
+
+ @Mock(strictness = LENIENT)
+ private PlatformBuilder platformBuilder;
+
+ @Mock(strictness = LENIENT)
+ private ReservedSignedState reservedSignedState;
+
+ @Mock(strictness = LENIENT)
+ private SignedState signedState;
+
+ @Mock(strictness = LENIENT)
+ private Platform platform;
+
+ @Mock(strictness = LENIENT)
+ private Hedera hedera;
+
private final ServicesMain subject = new ServicesMain();
// no local nodes specified but more than one match in address book
@@ -102,7 +156,7 @@ void returnsSerializableVersion() {
@Test
void noopsAsExpected() {
// expect:
- Assertions.assertDoesNotThrow(subject::run);
+ assertDoesNotThrow(subject::run);
}
@Test
diff --git a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/blocks/BlockStreamServiceTest.java b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/blocks/BlockStreamServiceTest.java
index 28e6cc7d06e0..d164fc526ab2 100644
--- a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/blocks/BlockStreamServiceTest.java
+++ b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/blocks/BlockStreamServiceTest.java
@@ -16,83 +16,62 @@
package com.hedera.node.app.blocks;
-import static com.hedera.node.app.blocks.schemas.V0540BlockStreamSchema.BLOCK_STREAM_INFO_KEY;
+import static com.hedera.node.app.fixtures.AppTestBase.DEFAULT_CONFIG;
import static org.assertj.core.api.Assertions.assertThat;
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mock.Strictness.LENIENT;
+import static org.mockito.ArgumentMatchers.argThat;
import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
+import static org.mockito.Mockito.verifyNoInteractions;
-import com.hedera.hapi.node.state.blockstream.BlockStreamInfo;
import com.hedera.node.app.blocks.schemas.V0540BlockStreamSchema;
import com.hedera.node.config.testfixtures.HederaTestConfigBuilder;
-import com.swirlds.config.api.Configuration;
-import com.swirlds.state.spi.MigrationContext;
-import com.swirlds.state.spi.Schema;
import com.swirlds.state.spi.SchemaRegistry;
-import com.swirlds.state.spi.StateDefinition;
-import com.swirlds.state.spi.WritableSingletonState;
-import com.swirlds.state.spi.WritableStates;
-import java.util.Set;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
-import org.mockito.ArgumentCaptor;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
-@SuppressWarnings({"rawtypes", "unchecked"})
@ExtendWith(MockitoExtension.class)
final class BlockStreamServiceTest {
- @Mock(strictness = LENIENT)
+ @Mock
private SchemaRegistry schemaRegistry;
- @Mock(strictness = LENIENT)
- private MigrationContext migrationContext;
+ private BlockStreamService subject;
- @Mock(strictness = LENIENT)
- private WritableSingletonState blockStreamState;
-
- @Mock(strictness = LENIENT)
- private WritableStates writableStates;
+ @Test
+ void serviceNameAsExpected() {
+ givenDisabledSubject();
- public static final Configuration DEFAULT_CONFIG = HederaTestConfigBuilder.createConfig();
+ assertThat(subject.getServiceName()).isEqualTo("BlockStreamService");
+ }
@Test
- void testGetServiceName() {
- BlockStreamService blockRecordService = new BlockStreamService(DEFAULT_CONFIG);
- assertEquals(BlockStreamService.NAME, blockRecordService.getServiceName());
+ void enabledSubjectRegistersV0540Schema() {
+ givenEnabledSubject();
+
+ subject.registerSchemas(schemaRegistry);
+
+ verify(schemaRegistry).register(argThat(s -> s instanceof V0540BlockStreamSchema));
}
@Test
- void testRegisterSchemas() {
- when(schemaRegistry.register(any())).then(invocation -> {
- Object[] args = invocation.getArguments();
- assertEquals(1, args.length);
- Schema schema = (Schema) args[0];
- assertThat(schema).isInstanceOf(V0540BlockStreamSchema.class);
- Set states = schema.statesToCreate(DEFAULT_CONFIG);
- assertEquals(1, states.size());
- assertTrue(states.contains(StateDefinition.singleton(BLOCK_STREAM_INFO_KEY, BlockStreamInfo.PROTOBUF)));
-
- when(migrationContext.newStates()).thenReturn(writableStates);
- when(migrationContext.previousVersion()).thenReturn(null);
- when(writableStates.getSingleton(BLOCK_STREAM_INFO_KEY)).thenReturn(blockStreamState);
-
- // FINISH:
- ArgumentCaptor blockInfoCapture = ArgumentCaptor.forClass(BlockStreamInfo.class);
-
- schema.migrate(migrationContext);
-
- verify(blockStreamState).put(blockInfoCapture.capture());
- assertEquals(BlockStreamInfo.DEFAULT, blockInfoCapture.getValue());
- return null;
- });
+ void disabledSubjectDoesNotRegisterSchema() {
+ givenDisabledSubject();
+
+ subject.registerSchemas(schemaRegistry);
+
+ verifyNoInteractions(schemaRegistry);
+
+ assertThat(subject.migratedLastBlockHash()).isEmpty();
+ }
+
+ private void givenEnabledSubject() {
final var testConfig = HederaTestConfigBuilder.create()
.withValue("blockStream.streamMode", "BOTH")
.getOrCreateConfig();
- BlockStreamService blockStreamService = new BlockStreamService(testConfig);
- blockStreamService.registerSchemas(schemaRegistry);
+ subject = new BlockStreamService(testConfig);
+ }
+
+ private void givenDisabledSubject() {
+ subject = new BlockStreamService(DEFAULT_CONFIG);
}
}
diff --git a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/blocks/impl/BlockStreamManagerImplTest.java b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/blocks/impl/BlockStreamManagerImplTest.java
new file mode 100644
index 000000000000..723c40667b4f
--- /dev/null
+++ b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/blocks/impl/BlockStreamManagerImplTest.java
@@ -0,0 +1,432 @@
+/*
+ * Copyright (C) 2024 Hedera Hashgraph, LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.hedera.node.app.blocks.impl;
+
+import static com.hedera.hapi.util.HapiUtils.asTimestamp;
+import static com.hedera.node.app.blocks.BlockStreamManager.ZERO_BLOCK_HASH;
+import static com.hedera.node.app.blocks.BlockStreamService.FAKE_RESTART_BLOCK_HASH;
+import static com.hedera.node.app.blocks.impl.BlockImplUtils.appendHash;
+import static com.hedera.node.app.blocks.impl.BlockImplUtils.combine;
+import static com.hedera.node.app.blocks.schemas.V0540BlockStreamSchema.BLOCK_STREAM_INFO_KEY;
+import static com.hedera.node.app.fixtures.AppTestBase.DEFAULT_CONFIG;
+import static com.hedera.node.app.hapi.utils.CommonUtils.noThrowSha384HashOf;
+import static com.swirlds.platform.state.service.schemas.V0540PlatformStateSchema.PLATFORM_STATE_KEY;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.BDDMockito.given;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.withSettings;
+
+import com.hedera.hapi.block.stream.BlockItem;
+import com.hedera.hapi.block.stream.RecordFileItem;
+import com.hedera.hapi.block.stream.output.StateChanges;
+import com.hedera.hapi.block.stream.output.TransactionResult;
+import com.hedera.hapi.node.base.SemanticVersion;
+import com.hedera.hapi.node.state.blockstream.BlockStreamInfo;
+import com.hedera.hapi.platform.event.EventTransaction;
+import com.hedera.hapi.platform.state.PlatformState;
+import com.hedera.node.app.blocks.BlockItemWriter;
+import com.hedera.node.app.blocks.BlockStreamService;
+import com.hedera.node.app.tss.TssBaseService;
+import com.hedera.node.config.ConfigProvider;
+import com.hedera.node.config.VersionedConfigImpl;
+import com.hedera.node.config.testfixtures.HederaTestConfigBuilder;
+import com.hedera.pbj.runtime.ParseException;
+import com.hedera.pbj.runtime.io.buffer.Bytes;
+import com.swirlds.platform.state.service.PlatformStateService;
+import com.swirlds.platform.system.Round;
+import com.swirlds.state.State;
+import com.swirlds.state.spi.CommittableWritableStates;
+import com.swirlds.state.spi.ReadableStates;
+import com.swirlds.state.spi.WritableSingletonStateBase;
+import com.swirlds.state.spi.WritableStates;
+import edu.umd.cs.findbugs.annotations.NonNull;
+import edu.umd.cs.findbugs.annotations.Nullable;
+import java.time.Instant;
+import java.util.concurrent.ForkJoinPool;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.ExtendWith;
+import org.mockito.ArgumentCaptor;
+import org.mockito.Mock;
+import org.mockito.junit.jupiter.MockitoExtension;
+
+@ExtendWith(MockitoExtension.class)
+class BlockStreamManagerImplTest {
+ private static final SemanticVersion CREATION_VERSION = new SemanticVersion(1, 2, 3, "alpha.1", "2");
+ private static final long ROUND_NO = 123L;
+ private static final long N_MINUS_2_BLOCK_NO = 664L;
+ private static final long N_MINUS_1_BLOCK_NO = 665L;
+ private static final long N_BLOCK_NO = 666L;
+ private static final Instant CONSENSUS_NOW = Instant.ofEpochSecond(1_234_567L);
+ private static final Bytes N_MINUS_2_BLOCK_HASH = Bytes.wrap(noThrowSha384HashOf(new byte[] {(byte) 0xAA}));
+ private static final Bytes FIRST_FAKE_SIGNATURE = Bytes.fromHex("ff".repeat(48));
+ private static final Bytes SECOND_FAKE_SIGNATURE = Bytes.fromHex("ee".repeat(48));
+ private static final BlockItem FAKE_EVENT_TRANSACTION =
+ BlockItem.newBuilder().eventTransaction(EventTransaction.DEFAULT).build();
+ private static final BlockItem FAKE_TRANSACTION_RESULT =
+ BlockItem.newBuilder().transactionResult(TransactionResult.DEFAULT).build();
+ private static final Bytes FAKE_RESULT_HASH = noThrowSha384HashOfItem(FAKE_TRANSACTION_RESULT);
+ private static final BlockItem FAKE_STATE_CHANGES =
+ BlockItem.newBuilder().stateChanges(StateChanges.DEFAULT).build();
+ private static final BlockItem FAKE_RECORD_FILE_ITEM =
+ BlockItem.newBuilder().recordFile(RecordFileItem.DEFAULT).build();
+
+ @Mock
+ private TssBaseService tssBaseService;
+
+ @Mock
+ private ConfigProvider configProvider;
+
+ @Mock
+ private BoundaryStateChangeListener boundaryStateChangeListener;
+
+ @Mock
+ private BlockItemWriter aWriter;
+
+ @Mock
+ private BlockItemWriter bWriter;
+
+ @Mock
+ private ReadableStates readableStates;
+
+ private WritableStates writableStates;
+
+ @Mock
+ private Round round;
+
+ @Mock
+ private State state;
+
+ private final AtomicReference lastAItem = new AtomicReference<>();
+ private final AtomicReference lastBItem = new AtomicReference<>();
+ private final AtomicReference stateRef = new AtomicReference<>();
+ private final AtomicReference infoRef = new AtomicReference<>();
+
+ private WritableSingletonStateBase blockStreamInfoState;
+
+ private BlockStreamManagerImpl subject;
+
+ @BeforeEach
+ void setUp() {
+ writableStates = mock(WritableStates.class, withSettings().extraInterfaces(CommittableWritableStates.class));
+ }
+
+ @Test
+ void requiresLastHashToBeInitialized() {
+ given(configProvider.getConfiguration()).willReturn(new VersionedConfigImpl(DEFAULT_CONFIG, 1));
+ subject = new BlockStreamManagerImpl(
+ () -> aWriter, ForkJoinPool.commonPool(), configProvider, tssBaseService, boundaryStateChangeListener);
+ assertThrows(IllegalStateException.class, () -> subject.startRound(round, state));
+ }
+
+ @Test
+ void startsAndEndsBlockWithSingleRoundPerBlockAsExpected() throws ParseException {
+ givenSubjectWith(
+ 1,
+ blockStreamInfoWith(N_MINUS_1_BLOCK_NO, N_MINUS_2_BLOCK_HASH, Bytes.EMPTY),
+ platformStateWith(null),
+ aWriter);
+ givenEndOfRoundSetup();
+ final ArgumentCaptor blockHashCaptor = ArgumentCaptor.forClass(byte[].class);
+
+ // Initialize the last (N-1) block hash
+ subject.initLastBlockHash(FAKE_RESTART_BLOCK_HASH);
+
+ // Start the round that will be block N
+ subject.startRound(round, state);
+
+ // Assert the internal state of the subject has changed as expected and the writer has been opened
+ verify(boundaryStateChangeListener).setLastUsedConsensusTime(CONSENSUS_NOW);
+ verify(aWriter).openBlock(N_BLOCK_NO);
+ assertEquals(N_MINUS_2_BLOCK_HASH, subject.blockHashByBlockNumber(N_MINUS_2_BLOCK_NO));
+ assertEquals(FAKE_RESTART_BLOCK_HASH, subject.blockHashByBlockNumber(N_MINUS_1_BLOCK_NO));
+ assertNull(subject.prngSeed());
+ assertEquals(N_BLOCK_NO, subject.blockNo());
+
+ // Write some items to the block
+ subject.writeItem(FAKE_EVENT_TRANSACTION);
+ subject.writeItem(FAKE_TRANSACTION_RESULT);
+ subject.writeItem(FAKE_STATE_CHANGES);
+ subject.writeItem(FAKE_RECORD_FILE_ITEM);
+
+ // End the round
+ subject.endRound(state, ROUND_NO);
+
+ // Assert the internal state of the subject has changed as expected and the writer has been closed
+ final var expectedBlockInfo = new BlockStreamInfo(
+ N_BLOCK_NO,
+ asTimestamp(CONSENSUS_NOW),
+ appendHash(combine(ZERO_BLOCK_HASH, FAKE_RESULT_HASH), appendHash(ZERO_BLOCK_HASH, Bytes.EMPTY, 4), 4),
+ appendHash(FAKE_RESTART_BLOCK_HASH, appendHash(N_MINUS_2_BLOCK_HASH, Bytes.EMPTY, 256), 256));
+ final var actualBlockInfo = infoRef.get();
+ assertEquals(expectedBlockInfo, actualBlockInfo);
+ verify(tssBaseService).requestLedgerSignature(blockHashCaptor.capture());
+
+ // Provide the ledger signature to the subject
+ subject.accept(blockHashCaptor.getValue(), FIRST_FAKE_SIGNATURE.toByteArray());
+
+ // Assert the block proof was written
+ final var proofItem = lastAItem.get();
+ assertNotNull(proofItem);
+ final var item = BlockItem.PROTOBUF.parse(proofItem);
+ assertTrue(item.hasBlockProof());
+ final var proof = item.blockProofOrThrow();
+ assertEquals(N_BLOCK_NO, proof.block());
+ assertEquals(FIRST_FAKE_SIGNATURE, proof.blockSignature());
+ }
+
+ @Test
+ void doesNotEndBlockWithMultipleRoundPerBlockIfNotModZero() {
+ givenSubjectWith(
+ 7,
+ blockStreamInfoWith(N_MINUS_1_BLOCK_NO, N_MINUS_2_BLOCK_HASH, Bytes.EMPTY),
+ platformStateWith(null),
+ aWriter);
+
+ // Initialize the last (N-1) block hash
+ subject.initLastBlockHash(FAKE_RESTART_BLOCK_HASH);
+
+ // Start the round that will be block N
+ subject.startRound(round, state);
+
+ // Assert the internal state of the subject has changed as expected and the writer has been opened
+ verify(boundaryStateChangeListener).setLastUsedConsensusTime(CONSENSUS_NOW);
+ verify(aWriter).openBlock(N_BLOCK_NO);
+ assertEquals(N_MINUS_2_BLOCK_HASH, subject.blockHashByBlockNumber(N_MINUS_2_BLOCK_NO));
+ assertEquals(FAKE_RESTART_BLOCK_HASH, subject.blockHashByBlockNumber(N_MINUS_1_BLOCK_NO));
+
+ // Write some items to the block
+ subject.writeItem(FAKE_EVENT_TRANSACTION);
+ subject.writeItem(FAKE_TRANSACTION_RESULT);
+ subject.writeItem(FAKE_STATE_CHANGES);
+ subject.writeItem(FAKE_RECORD_FILE_ITEM);
+
+ // End the round
+ subject.endRound(state, ROUND_NO);
+
+ // Assert the internal state of the subject has changed as expected and the writer has been closed
+ verify(tssBaseService, never()).requestLedgerSignature(any());
+ }
+
+ @Test
+ void alwaysEndsBlockOnFreezeRoundPerBlockAsExpected() throws ParseException {
+ final var resultHashes = Bytes.fromHex("aa".repeat(48) + "bb".repeat(48) + "cc".repeat(48) + "dd".repeat(48));
+ givenSubjectWith(
+ 7,
+ blockStreamInfoWith(N_MINUS_1_BLOCK_NO, N_MINUS_2_BLOCK_HASH, resultHashes),
+ platformStateWith(CONSENSUS_NOW.minusSeconds(1)),
+ aWriter);
+ givenEndOfRoundSetup();
+ given(round.getRoundNum()).willReturn(ROUND_NO);
+ final ArgumentCaptor blockHashCaptor = ArgumentCaptor.forClass(byte[].class);
+
+ // Initialize the last (N-1) block hash
+ subject.initLastBlockHash(FAKE_RESTART_BLOCK_HASH);
+
+ // Start the round that will be block N
+ subject.startRound(round, state);
+
+ // Assert the internal state of the subject has changed as expected and the writer has been opened
+ verify(boundaryStateChangeListener).setLastUsedConsensusTime(CONSENSUS_NOW);
+ verify(aWriter).openBlock(N_BLOCK_NO);
+ assertEquals(N_MINUS_2_BLOCK_HASH, subject.blockHashByBlockNumber(N_MINUS_2_BLOCK_NO));
+ assertEquals(FAKE_RESTART_BLOCK_HASH, subject.blockHashByBlockNumber(N_MINUS_1_BLOCK_NO));
+ assertEquals(N_BLOCK_NO, subject.blockNo());
+
+ // Write some items to the block
+ subject.writeItem(FAKE_EVENT_TRANSACTION);
+ assertEquals(Bytes.fromHex("aa".repeat(48)), subject.prngSeed());
+ subject.writeItem(FAKE_TRANSACTION_RESULT);
+ assertEquals(Bytes.fromHex("bb".repeat(48)), subject.prngSeed());
+ subject.writeItem(FAKE_STATE_CHANGES);
+ for (int i = 0; i < 8; i++) {
+ subject.writeItem(FAKE_RECORD_FILE_ITEM);
+ }
+
+ // End the round
+ subject.endRound(state, ROUND_NO);
+
+ // Assert the internal state of the subject has changed as expected and the writer has been closed
+ final var expectedBlockInfo = new BlockStreamInfo(
+ N_BLOCK_NO,
+ asTimestamp(CONSENSUS_NOW),
+ appendHash(combine(Bytes.fromHex("dd".repeat(48)), FAKE_RESULT_HASH), resultHashes, 4),
+ appendHash(FAKE_RESTART_BLOCK_HASH, appendHash(N_MINUS_2_BLOCK_HASH, Bytes.EMPTY, 256), 256));
+ final var actualBlockInfo = infoRef.get();
+ assertEquals(expectedBlockInfo, actualBlockInfo);
+ verify(tssBaseService).requestLedgerSignature(blockHashCaptor.capture());
+
+ // Provide the ledger signature to the subject
+ subject.accept(blockHashCaptor.getValue(), FIRST_FAKE_SIGNATURE.toByteArray());
+
+ // Assert the block proof was written
+ final var proofItem = lastAItem.get();
+ assertNotNull(proofItem);
+ final var item = BlockItem.PROTOBUF.parse(proofItem);
+ assertTrue(item.hasBlockProof());
+ final var proof = item.blockProofOrThrow();
+ assertEquals(N_BLOCK_NO, proof.block());
+ assertEquals(FIRST_FAKE_SIGNATURE, proof.blockSignature());
+ }
+
+ @Test
+ void supportsMultiplePendingBlocksWithIndirectProofAsExpected() throws ParseException {
+ givenSubjectWith(
+ 1,
+ blockStreamInfoWith(N_MINUS_1_BLOCK_NO, N_MINUS_2_BLOCK_HASH, Bytes.EMPTY),
+ platformStateWith(null),
+ aWriter,
+ bWriter);
+ givenEndOfRoundSetup();
+ doAnswer(invocationOnMock -> {
+ lastBItem.set(invocationOnMock.getArgument(0));
+ return bWriter;
+ })
+ .when(bWriter)
+ .writeItem(any());
+ final ArgumentCaptor blockHashCaptor = ArgumentCaptor.forClass(byte[].class);
+
+ // Initialize the last (N-1) block hash
+ subject.initLastBlockHash(FAKE_RESTART_BLOCK_HASH);
+
+ // Start the round that will be block N
+ subject.startRound(round, state);
+ // Write some items to the block
+ subject.writeItem(FAKE_EVENT_TRANSACTION);
+ subject.writeItem(FAKE_TRANSACTION_RESULT);
+ subject.writeItem(FAKE_STATE_CHANGES);
+ subject.writeItem(FAKE_RECORD_FILE_ITEM);
+ // End the round in block N
+ subject.endRound(state, ROUND_NO);
+
+ // Start the round that will be block N+1
+ subject.startRound(round, state);
+ // Write some items to the block
+ subject.writeItem(FAKE_EVENT_TRANSACTION);
+ subject.writeItem(FAKE_TRANSACTION_RESULT);
+ subject.writeItem(FAKE_STATE_CHANGES);
+ subject.writeItem(FAKE_RECORD_FILE_ITEM);
+ // End the round in block N+1
+ subject.endRound(state, ROUND_NO + 1);
+
+ verify(tssBaseService, times(2)).requestLedgerSignature(blockHashCaptor.capture());
+ final var allBlockHashes = blockHashCaptor.getAllValues();
+ assertEquals(2, allBlockHashes.size());
+
+ // Provide the N+1 ledger signature to the subject first
+ subject.accept(allBlockHashes.getLast(), FIRST_FAKE_SIGNATURE.toByteArray());
+ subject.accept(allBlockHashes.getFirst(), SECOND_FAKE_SIGNATURE.toByteArray());
+
+ // Assert both block proofs were written, but with the proof for N using an indirect proof
+ final var aProofItem = lastAItem.get();
+ assertNotNull(aProofItem);
+ final var aItem = BlockItem.PROTOBUF.parse(aProofItem);
+ assertTrue(aItem.hasBlockProof());
+ final var aProof = aItem.blockProofOrThrow();
+ assertEquals(N_BLOCK_NO, aProof.block());
+ assertEquals(FIRST_FAKE_SIGNATURE, aProof.blockSignature());
+ assertEquals(2, aProof.siblingHashes().size());
+ // And the proof for N+1 using a direct proof
+ final var bProofItem = lastBItem.get();
+ assertNotNull(bProofItem);
+ final var bItem = BlockItem.PROTOBUF.parse(bProofItem);
+ assertTrue(bItem.hasBlockProof());
+ final var bProof = bItem.blockProofOrThrow();
+ assertEquals(N_BLOCK_NO + 1, bProof.block());
+ assertEquals(FIRST_FAKE_SIGNATURE, bProof.blockSignature());
+ assertTrue(bProof.siblingHashes().isEmpty());
+ }
+
+ private void givenSubjectWith(
+ final int roundsPerBlock,
+ @NonNull final BlockStreamInfo blockStreamInfo,
+ @NonNull final PlatformState platformState,
+ @NonNull final BlockItemWriter... writers) {
+ given(round.getConsensusTimestamp()).willReturn(CONSENSUS_NOW);
+ final AtomicInteger nextWriter = new AtomicInteger(0);
+ final var config = HederaTestConfigBuilder.create()
+ .withValue("blockStream.roundsPerBlock", roundsPerBlock)
+ .getOrCreateConfig();
+ given(configProvider.getConfiguration()).willReturn(new VersionedConfigImpl(config, 1L));
+ subject = new BlockStreamManagerImpl(
+ () -> writers[nextWriter.getAndIncrement()],
+ ForkJoinPool.commonPool(),
+ configProvider,
+ tssBaseService,
+ boundaryStateChangeListener);
+ subject.appendRealHashes();
+ given(state.getReadableStates(BlockStreamService.NAME)).willReturn(readableStates);
+ given(state.getReadableStates(PlatformStateService.NAME)).willReturn(readableStates);
+ infoRef.set(blockStreamInfo);
+ stateRef.set(platformState);
+ blockStreamInfoState = new WritableSingletonStateBase<>(BLOCK_STREAM_INFO_KEY, infoRef::get, infoRef::set);
+ given(readableStates.getSingleton(BLOCK_STREAM_INFO_KEY))
+ .willReturn(blockStreamInfoState);
+ given(readableStates.getSingleton(PLATFORM_STATE_KEY))
+ .willReturn(new WritableSingletonStateBase<>(PLATFORM_STATE_KEY, stateRef::get, stateRef::set));
+ }
+
+ private void givenEndOfRoundSetup() {
+ given(boundaryStateChangeListener.flushChanges()).willReturn(FAKE_STATE_CHANGES);
+ doAnswer(invocationOnMock -> {
+ lastAItem.set(invocationOnMock.getArgument(0));
+ return aWriter;
+ })
+ .when(aWriter)
+ .writeItem(any());
+ given(state.getWritableStates(BlockStreamService.NAME)).willReturn(writableStates);
+ given(writableStates.getSingleton(BLOCK_STREAM_INFO_KEY))
+ .willReturn(blockStreamInfoState);
+ doAnswer(invocationOnMock -> {
+ blockStreamInfoState.commit();
+ return null;
+ })
+ .when((CommittableWritableStates) writableStates)
+ .commit();
+ }
+
+ private BlockStreamInfo blockStreamInfoWith(
+ final long blockNumber, @NonNull final Bytes nMinus2Hash, @NonNull final Bytes resultHashes) {
+ return BlockStreamInfo.newBuilder()
+ .blockNumber(blockNumber)
+ .trailingBlockHashes(appendHash(nMinus2Hash, Bytes.EMPTY, 256))
+ .trailingOutputHashes(resultHashes)
+ .build();
+ }
+
+ private PlatformState platformStateWith(@Nullable final Instant freezeTime) {
+ return PlatformState.newBuilder()
+ .creationSoftwareVersion(CREATION_VERSION)
+ .freezeTime(freezeTime == null ? null : asTimestamp(freezeTime))
+ .build();
+ }
+
+ private static Bytes noThrowSha384HashOfItem(@NonNull final BlockItem item) {
+ return Bytes.wrap(noThrowSha384HashOf(BlockItem.PROTOBUF.toBytes(item).toByteArray()));
+ }
+}
diff --git a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/blocks/schemas/V0540BlockStreamSchemaTest.java b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/blocks/schemas/V0540BlockStreamSchemaTest.java
index 7371a94270f3..2679d1b72ed9 100644
--- a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/blocks/schemas/V0540BlockStreamSchemaTest.java
+++ b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/blocks/schemas/V0540BlockStreamSchemaTest.java
@@ -17,75 +17,121 @@
package com.hedera.node.app.blocks.schemas;
import static com.hedera.node.app.blocks.schemas.V0540BlockStreamSchema.BLOCK_STREAM_INFO_KEY;
+import static com.hedera.node.app.fixtures.AppTestBase.DEFAULT_CONFIG;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.mockito.Mock.Strictness.LENIENT;
+import static org.mockito.BDDMockito.given;
import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
+import static org.mockito.Mockito.verifyNoInteractions;
+import com.hedera.hapi.node.base.SemanticVersion;
+import com.hedera.hapi.node.base.Timestamp;
+import com.hedera.hapi.node.state.blockrecords.BlockInfo;
+import com.hedera.hapi.node.state.blockrecords.RunningHashes;
import com.hedera.hapi.node.state.blockstream.BlockStreamInfo;
-import com.hedera.node.config.testfixtures.HederaTestConfigBuilder;
-import com.swirlds.config.api.Configuration;
+import com.hedera.pbj.runtime.io.buffer.Bytes;
import com.swirlds.state.spi.MigrationContext;
-import com.swirlds.state.spi.StateDefinition;
import com.swirlds.state.spi.WritableSingletonState;
import com.swirlds.state.spi.WritableStates;
-import java.util.Set;
+import java.util.Map;
+import java.util.function.Consumer;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
-import org.mockito.ArgumentCaptor;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
@ExtendWith(MockitoExtension.class)
public class V0540BlockStreamSchemaTest {
- @Mock(strictness = LENIENT)
- private MigrationContext mockCtx;
+ @Mock
+ private MigrationContext migrationContext;
- @Mock(strictness = LENIENT)
- private WritableSingletonState
*
Registers expected sidecars.
*
* When a client has registered all its expectations with a {@link SidecarWatcher}
@@ -71,7 +71,7 @@ public class SidecarWatcher {
private record ConstructionDetails(String creatingThread, String stackTrace) {}
public SidecarWatcher(@NonNull final Path path) {
- this.unsubscribe = RECORD_STREAM_ACCESS.subscribe(guaranteedExtantDir(path), new StreamDataListener() {
+ this.unsubscribe = STREAM_FILE_ACCESS.subscribe(guaranteedExtantDir(path), new StreamDataListener() {
@Override
public void onNewSidecar(@NonNull final TransactionSidecarRecord sidecar) {
actualSidecars.add(sidecar);
diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/compose/PerpetualLocalCalls.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/compose/PerpetualLocalCalls.java
deleted file mode 100644
index f559d6dbd46e..000000000000
--- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/compose/PerpetualLocalCalls.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright (C) 2020-2024 Hedera Hashgraph, LLC
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.hedera.services.bdd.suites.compose;
-
-import static com.hedera.services.bdd.spec.HapiSpec.defaultHapiSpec;
-import static com.hedera.services.bdd.spec.assertions.ContractFnResultAsserts.isLiteralResult;
-import static com.hedera.services.bdd.spec.assertions.ContractFnResultAsserts.resultWith;
-import static com.hedera.services.bdd.spec.queries.QueryVerbs.contractCallLocal;
-import static com.hedera.services.bdd.spec.transactions.TxnVerbs.contractCreate;
-import static com.hedera.services.bdd.spec.transactions.TxnVerbs.uploadInitCode;
-import static com.hedera.services.bdd.spec.utilops.UtilVerbs.runWithProvider;
-import static com.hedera.services.bdd.suites.contract.Utils.FunctionType.FUNCTION;
-import static com.hedera.services.bdd.suites.contract.Utils.getABIFor;
-import static java.util.concurrent.TimeUnit.MINUTES;
-
-import com.hedera.services.bdd.spec.HapiSpec;
-import com.hedera.services.bdd.spec.HapiSpecOperation;
-import com.hedera.services.bdd.spec.SpecOperation;
-import com.hedera.services.bdd.spec.infrastructure.OpProvider;
-import com.hedera.services.bdd.suites.HapiSuite;
-import java.math.BigInteger;
-import java.util.List;
-import java.util.Optional;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.atomic.AtomicReference;
-import java.util.function.Function;
-import java.util.stream.Stream;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.junit.jupiter.api.DynamicTest;
-
-public class PerpetualLocalCalls extends HapiSuite {
-
- private static final Logger log = LogManager.getLogger(PerpetualLocalCalls.class);
- public static final String CHILD_STORAGE = "ChildStorage";
-
- private AtomicLong duration = new AtomicLong(Long.MAX_VALUE);
- private AtomicReference unit = new AtomicReference<>(MINUTES);
- private AtomicInteger maxOpsPerSec = new AtomicInteger(100);
- private AtomicInteger totalBeforeFailure = new AtomicInteger(0);
-
- public static void main(String... args) {
- new PerpetualLocalCalls().runSuiteSync();
- }
-
- @Override
- public List> getSpecsInSuite() {
- return List.of(localCallsForever());
- }
-
- final Stream localCallsForever() {
- return defaultHapiSpec("LocalCallsForever")
- .given()
- .when()
- .then(runWithProvider(localCallsFactory())
- .lasting(duration::get, unit::get)
- .maxOpsPerSec(maxOpsPerSec::get));
- }
-
- private Function localCallsFactory() {
- return spec -> new OpProvider() {
- @Override
- public List suggestedInitializers() {
- return List.of(uploadInitCode(CHILD_STORAGE), contractCreate(CHILD_STORAGE));
- }
-
- @Override
- public Optional get() {
- var op = contractCallLocal(CHILD_STORAGE, "getMyValue")
- .noLogging()
- .has(resultWith()
- .resultThruAbi(
- getABIFor(FUNCTION, "getMyValue", CHILD_STORAGE),
- isLiteralResult(new Object[] {BigInteger.valueOf(73)})));
- var soFar = totalBeforeFailure.getAndIncrement();
- if (soFar % 1000 == 0) {
- log.info("--- {}", soFar);
- }
- return Optional.of(op);
- }
- };
- }
-
- @Override
- protected Logger getResultsLogger() {
- return log;
- }
-}
diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/hapi/ContractCallSuite.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/hapi/ContractCallSuite.java
index 9b93a3a48dad..5a691d768d2d 100644
--- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/hapi/ContractCallSuite.java
+++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/hapi/ContractCallSuite.java
@@ -17,6 +17,7 @@
package com.hedera.services.bdd.suites.contract.hapi;
import static com.hedera.node.app.hapi.utils.EthSigsUtils.recoverAddressFromPubKey;
+import static com.hedera.services.bdd.junit.TestTags.ADHOC;
import static com.hedera.services.bdd.junit.TestTags.SMART_CONTRACT;
import static com.hedera.services.bdd.spec.HapiPropertySource.asContract;
import static com.hedera.services.bdd.spec.HapiPropertySource.asContractString;
@@ -71,9 +72,9 @@
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.logIt;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.newKeyListNamed;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.newKeyNamed;
+import static com.hedera.services.bdd.spec.utilops.UtilVerbs.recordStreamMustIncludeNoFailuresFrom;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.sidecarIdValidator;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.sourcing;
-import static com.hedera.services.bdd.spec.utilops.UtilVerbs.streamMustIncludeNoFailuresFrom;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.submitModified;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.withOpContext;
import static com.hedera.services.bdd.spec.utilops.mod.ModificationUtils.withSuccessivelyVariedBodyIds;
@@ -164,8 +165,7 @@
import org.junit.jupiter.api.Tag;
@Tag(SMART_CONTRACT)
-// @Tag(ADHOC)
-@Tag("ONEOFF")
+@Tag(ADHOC)
public class ContractCallSuite {
public static final String TOKEN = "yahcliToken";
@@ -245,7 +245,7 @@ final Stream repeatedCreate2FailsWithInterpretableActionSidecars()
final var secondCreation = "secondCreation";
return defaultHapiSpec("repeatedCreate2FailsWithInterpretableActionSidecars", NONDETERMINISTIC_TRANSACTION_FEES)
.given(
- streamMustIncludeNoFailuresFrom(sidecarIdValidator()),
+ recordStreamMustIncludeNoFailuresFrom(sidecarIdValidator()),
cryptoCreate(ACCOUNT).balance(ONE_MILLION_HBARS),
uploadInitCode(contract),
contractCreate(contract))
@@ -275,7 +275,7 @@ final Stream insufficientGasToPrecompileFailsWithInterpretableActio
final var tokenInfoFn = new Function("getTokenInfo(address)");
return defaultHapiSpec("insufficientGasToPrecompileFailsWithInterpretableActionSidecars")
.given(
- streamMustIncludeNoFailuresFrom(sidecarIdValidator()),
+ recordStreamMustIncludeNoFailuresFrom(sidecarIdValidator()),
uploadInitCode(contract),
contractCreate(contract))
.when(tokenCreate("someToken").exposingAddressTo(someTokenAddress::set))
@@ -307,7 +307,7 @@ final Stream hollowCreationFailsCleanly() {
final var contract = "HollowAccountCreator";
return defaultHapiSpec("HollowCreationFailsCleanly", FULLY_NONDETERMINISTIC)
.given(
- streamMustIncludeNoFailuresFrom(sidecarIdValidator()),
+ recordStreamMustIncludeNoFailuresFrom(sidecarIdValidator()),
uploadInitCode(contract),
contractCreate(contract))
.when(contractCall(contract, "testCallFoo", randomHeadlongAddress(), BigInteger.valueOf(500_000L))
@@ -1971,7 +1971,7 @@ final Stream hscsEvm010ReceiverMustSignContractTx() {
NONDETERMINISTIC_FUNCTION_PARAMETERS,
NONDETERMINISTIC_TRANSACTION_FEES)
.given(
- streamMustIncludeNoFailuresFrom(sidecarIdValidator()),
+ recordStreamMustIncludeNoFailuresFrom(sidecarIdValidator()),
newKeyNamed(RECEIVER_KEY),
cryptoCreate(ACC)
.balance(5 * ONE_HUNDRED_HBARS)
@@ -2569,7 +2569,7 @@ final Stream callToNonExtantLongZeroAddressUsesTargetedAddress() {
final var nonExtantMirrorAddress = asHeadlongAddress("0xE8D4A50FFF");
return defaultHapiSpec("callToNonExtantLongZeroAddressUsesTargetedAddress")
.given(
- streamMustIncludeNoFailuresFrom(sidecarIdValidator()),
+ recordStreamMustIncludeNoFailuresFrom(sidecarIdValidator()),
uploadInitCode(contract),
contractCreate(contract))
.when()
@@ -2583,7 +2583,7 @@ final Stream callToNonExtantEvmAddressUsesTargetedAddress() {
final var nonExtantEvmAddress = asHeadlongAddress(TxnUtils.randomUtf8Bytes(20));
return defaultHapiSpec("callToNonExtantEvmAddressUsesTargetedAddress")
.given(
- streamMustIncludeNoFailuresFrom(sidecarIdValidator()),
+ recordStreamMustIncludeNoFailuresFrom(sidecarIdValidator()),
uploadInitCode(contract),
contractCreate(contract))
.when()
diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/hapi/ContractDeleteSuite.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/hapi/ContractDeleteSuite.java
index 473849bfbb6b..4566e272fd80 100644
--- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/hapi/ContractDeleteSuite.java
+++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/hapi/ContractDeleteSuite.java
@@ -147,13 +147,14 @@ final Stream cannotUseMoreThanChildContractLimit() {
asHeadlongAddress(treasuryMirrorAddr.get()),
BigInteger.valueOf(illegalNumChildren))
.via(precompileViolation)
+ .gas(215_000L)
.hasKnownStatus(MAX_CHILD_RECORDS_EXCEEDED),
contractCall(
contract,
"createThingsRepeatedly",
BigInteger.valueOf(illegalNumChildren))
.via(internalCreateViolation)
- .gas(15_000_000)
+ .gas(15_000_000L)
.hasKnownStatus(MAX_CHILD_RECORDS_EXCEEDED));
}))
.then(
diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/ContractBurnHTSV2SecurityModelSuite.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/ContractBurnHTSV2SecurityModelSuite.java
index 8466c2425021..56568b8e8ef1 100644
--- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/ContractBurnHTSV2SecurityModelSuite.java
+++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/ContractBurnHTSV2SecurityModelSuite.java
@@ -826,7 +826,7 @@ final Stream V2SecurityHscsPreC020RollbackBurnThatFailsAfterAPrecom
@HapiTest
final Stream V2SecurityHscsPrec004TokenBurnOfFungibleTokenUnits() {
- final var gasUsed = 14085L;
+ final var gasUsed = 15284L;
final var CREATION_TX = "CREATION_TX";
final var MULTI_KEY = "MULTI_KEY";
@@ -990,7 +990,7 @@ final Stream V2SecurityHscsPrec011BurnAfterNestedMint() {
@HapiTest
final Stream V2SecurityHscsPrec005TokenBurnOfNft() {
- final var gasUsed = 14085;
+ final var gasUsed = 15284L;
final var CREATION_TX = "CREATION_TX";
return defaultHapiSpec("V2SecurityHscsPrec005TokenBurnOfNft")
.given(
diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/ContractKeysStillWorkAsExpectedSuite.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/ContractKeysStillWorkAsExpectedSuite.java
index c38ee70f02aa..cf9beecc0905 100644
--- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/ContractKeysStillWorkAsExpectedSuite.java
+++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/ContractKeysStillWorkAsExpectedSuite.java
@@ -44,9 +44,9 @@
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.childRecordsCheck;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.newKeyNamed;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.noOp;
+import static com.hedera.services.bdd.spec.utilops.UtilVerbs.recordStreamMustIncludePassFrom;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.recordedChildBodyWithId;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.sourcing;
-import static com.hedera.services.bdd.spec.utilops.UtilVerbs.streamMustInclude;
import static com.hedera.services.bdd.spec.utilops.records.SnapshotMatchMode.NONDETERMINISTIC_FUNCTION_PARAMETERS;
import static com.hedera.services.bdd.spec.utilops.records.SnapshotMatchMode.NONDETERMINISTIC_NONCE;
import static com.hedera.services.bdd.suites.HapiSuite.ONE_HBAR;
@@ -85,21 +85,22 @@ final Stream approvalFallbacksRequiredWithoutTopLevelSigAccess() {
final AtomicReference bReceiverAddr = new AtomicReference<>();
return hapiTest(
- streamMustInclude(recordedChildBodyWithId(TOKEN_UNIT_FROM_TO_OTHERS_TXN, 1, (spec, txn) -> {
- if (txn.hasNodeStakeUpdate()) {
- // Avoid asserting something about an end-of-staking-period NodeStakeUpdate in CI
- return;
- }
- final var tokenTransfers = txn.getCryptoTransfer().getTokenTransfersList();
- assertEquals(1, tokenTransfers.size());
- final var tokenTransfer = tokenTransfers.getFirst();
- for (final var adjust : tokenTransfer.getTransfersList()) {
- if (adjust.getAmount() < 0) {
- // The debit should have been automatically converted to an approval
- assertTrue(adjust.getIsApproval());
- }
- }
- })),
+ recordStreamMustIncludePassFrom(
+ recordedChildBodyWithId(TOKEN_UNIT_FROM_TO_OTHERS_TXN, 1, (spec, txn) -> {
+ if (txn.hasNodeStakeUpdate()) {
+ // Avoid asserting something about an end-of-staking-period NodeStakeUpdate in CI
+ return;
+ }
+ final var tokenTransfers = txn.getCryptoTransfer().getTokenTransfersList();
+ assertEquals(1, tokenTransfers.size());
+ final var tokenTransfer = tokenTransfers.getFirst();
+ for (final var adjust : tokenTransfer.getTransfersList()) {
+ if (adjust.getAmount() < 0) {
+ // The debit should have been automatically converted to an approval
+ assertTrue(adjust.getIsApproval());
+ }
+ }
+ })),
someWellKnownTokensAndAccounts(
fungibleTokenMirrorAddr,
nonFungibleTokenMirrorAddr,
diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/RedirectPrecompileSuite.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/RedirectPrecompileSuite.java
index 7762d67937f0..46419a29a5c3 100644
--- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/RedirectPrecompileSuite.java
+++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/RedirectPrecompileSuite.java
@@ -90,7 +90,7 @@ final Stream balanceOf() {
.contractCallResult(htsPrecompileResult()
.forFunction(ParsingConstants.FunctionType.ERC_BALANCE)
.withBalance(totalSupply))
- .gasUsed(100L))));
+ .gasUsed(2607L))));
}
@HapiTest
@@ -128,7 +128,7 @@ final Stream redirectToInvalidToken() {
CONTRACT_REVERT_EXECUTED,
recordWith()
.status(INVALID_TOKEN_ID)
- .contractCallResult(resultWith().gasUsed(100L))));
+ .contractCallResult(resultWith().gasUsed(2607L))));
}
@HapiTest
diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/TokenInfoHTSSuite.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/TokenInfoHTSSuite.java
index 8f3b7dcdf728..6d7b6125254e 100644
--- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/TokenInfoHTSSuite.java
+++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/TokenInfoHTSSuite.java
@@ -100,7 +100,6 @@
import java.util.OptionalLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Stream;
-import org.apache.tuweni.bytes.Bytes;
import org.junit.jupiter.api.DynamicTest;
import org.junit.jupiter.api.Tag;
@@ -180,8 +179,11 @@ final Stream happyPathGetTokenInfo() {
newKeyNamed(WIPE_KEY),
newKeyNamed(FEE_SCHEDULE_KEY),
newKeyNamed(PAUSE_KEY),
+ newKeyNamed(TokenKeyType.METADATA_KEY.name()),
uploadInitCode(TOKEN_INFO_CONTRACT),
contractCreate(TOKEN_INFO_CONTRACT).gas(1_000_000L),
+ uploadInitCode("TokenInfo"),
+ contractCreate("TokenInfo").gas(1_000_000L),
tokenCreate(PRIMARY_TOKEN_NAME)
.supplyType(TokenSupplyType.FINITE)
.entityMemo(MEMO)
@@ -199,6 +201,8 @@ final Stream happyPathGetTokenInfo() {
.wipeKey(WIPE_KEY)
.feeScheduleKey(FEE_SCHEDULE_KEY)
.pauseKey(PAUSE_KEY)
+ .metadataKey(TokenKeyType.METADATA_KEY.name())
+ .metaData("metadata")
.withCustom(fixedHbarFee(500L, HTS_COLLECTOR))
// Include a fractional fee with no minimum to collect
.withCustom(fractionalFee(
@@ -209,8 +213,7 @@ final Stream happyPathGetTokenInfo() {
MINIMUM_TO_COLLECT,
OptionalLong.of(MAXIMUM_TO_COLLECT),
TOKEN_TREASURY))
- .via(CREATE_TXN),
- getTokenInfo(PRIMARY_TOKEN_NAME).via(GET_TOKEN_INFO_TXN))
+ .via(CREATE_TXN))
.when(withOpContext((spec, opLog) -> allRunFor(
spec,
contractCall(
@@ -224,7 +227,14 @@ final Stream happyPathGetTokenInfo() {
TOKEN_INFO_CONTRACT,
GET_INFORMATION_FOR_TOKEN,
HapiParserUtil.asHeadlongAddress(
- asAddress(spec.registry().getTokenID(PRIMARY_TOKEN_NAME)))))))
+ asAddress(spec.registry().getTokenID(PRIMARY_TOKEN_NAME)))),
+ contractCall(
+ "TokenInfo",
+ "getInformationForTokenV2",
+ HapiParserUtil.asHeadlongAddress(
+ asAddress(spec.registry().getTokenID(PRIMARY_TOKEN_NAME))))
+ .via("TOKEN_INFO_TXN_V2")
+ .gas(1_000_000L))))
.then(exposeTargetLedgerIdTo(targetLedgerId::set), withOpContext((spec, opLog) -> {
final var getTokenInfoQuery = getTokenInfo(PRIMARY_TOKEN_NAME);
allRunFor(spec, getTokenInfoQuery);
@@ -248,6 +258,25 @@ final Stream happyPathGetTokenInfo() {
.forFunction(FunctionType.HAPI_GET_TOKEN_INFO)
.withStatus(SUCCESS)
.withTokenInfo(getTokenInfoStructForFungibleToken(
+ spec,
+ PRIMARY_TOKEN_NAME,
+ SYMBOL,
+ MEMO,
+ spec.registry()
+ .getAccountID(TOKEN_TREASURY),
+ getTokenKeyFromSpec(spec, TokenKeyType.ADMIN_KEY),
+ expirySecond,
+ targetLedgerId.get()))))),
+ childRecordsCheck(
+ "TOKEN_INFO_TXN_V2",
+ SUCCESS,
+ recordWith()
+ .status(SUCCESS)
+ .contractCallResult(resultWith()
+ .contractCallResult(htsPrecompileResult()
+ .forFunction(FunctionType.HAPI_GET_TOKEN_INFO_V2)
+ .withStatus(SUCCESS)
+ .withTokenInfo(getTokenInfoStructForFungibleTokenV2(
spec,
PRIMARY_TOKEN_NAME,
SYMBOL,
@@ -280,8 +309,11 @@ final Stream happyPathGetFungibleTokenInfo() {
newKeyNamed(WIPE_KEY),
newKeyNamed(FEE_SCHEDULE_KEY),
newKeyNamed(PAUSE_KEY),
+ newKeyNamed(TokenKeyType.METADATA_KEY.name()),
uploadInitCode(TOKEN_INFO_CONTRACT),
contractCreate(TOKEN_INFO_CONTRACT).gas(1_000_000L),
+ uploadInitCode("TokenInfo"),
+ contractCreate("TokenInfo").gas(1_000_000L),
tokenCreate(FUNGIBLE_TOKEN_NAME)
.supplyType(TokenSupplyType.FINITE)
.entityMemo(MEMO)
@@ -300,6 +332,8 @@ final Stream happyPathGetFungibleTokenInfo() {
.wipeKey(WIPE_KEY)
.feeScheduleKey(FEE_SCHEDULE_KEY)
.pauseKey(PAUSE_KEY)
+ .metadataKey(TokenKeyType.METADATA_KEY.name())
+ .metaData("metadata")
.withCustom(fixedHbarFee(500L, HTS_COLLECTOR))
// Also include a fractional fee with no minimum to collect
.withCustom(fractionalFee(
@@ -324,7 +358,14 @@ final Stream happyPathGetFungibleTokenInfo() {
TOKEN_INFO_CONTRACT,
GET_INFORMATION_FOR_FUNGIBLE_TOKEN,
HapiParserUtil.asHeadlongAddress(
- asAddress(spec.registry().getTokenID(FUNGIBLE_TOKEN_NAME)))))))
+ asAddress(spec.registry().getTokenID(FUNGIBLE_TOKEN_NAME)))),
+ contractCall(
+ "TokenInfo",
+ "getInformationForFungibleTokenV2",
+ HapiParserUtil.asHeadlongAddress(
+ asAddress(spec.registry().getTokenID(FUNGIBLE_TOKEN_NAME))))
+ .via("FUNGIBLE_TOKEN_INFO_TXN_V2")
+ .gas(1_000_000L))))
.then(exposeTargetLedgerIdTo(targetLedgerId::set), withOpContext((spec, opLog) -> {
final var getTokenInfoQuery = getTokenInfo(FUNGIBLE_TOKEN_NAME);
allRunFor(spec, getTokenInfoQuery);
@@ -348,6 +389,26 @@ final Stream happyPathGetFungibleTokenInfo() {
.withStatus(SUCCESS)
.withDecimals(decimals)
.withTokenInfo(getTokenInfoStructForFungibleToken(
+ spec,
+ FUNGIBLE_TOKEN_NAME,
+ FUNGIBLE_SYMBOL,
+ MEMO,
+ spec.registry()
+ .getAccountID(TOKEN_TREASURY),
+ getTokenKeyFromSpec(spec, TokenKeyType.ADMIN_KEY),
+ expirySecond,
+ targetLedgerId.get()))))),
+ childRecordsCheck(
+ "FUNGIBLE_TOKEN_INFO_TXN_V2",
+ SUCCESS,
+ recordWith()
+ .status(SUCCESS)
+ .contractCallResult(resultWith()
+ .contractCallResult(htsPrecompileResult()
+ .forFunction(FunctionType.HAPI_GET_FUNGIBLE_TOKEN_INFO_V2)
+ .withStatus(SUCCESS)
+ .withDecimals(decimals)
+ .withTokenInfo(getTokenInfoStructForFungibleTokenV2(
spec,
FUNGIBLE_TOKEN_NAME,
FUNGIBLE_SYMBOL,
@@ -383,8 +444,11 @@ final Stream happyPathGetNonFungibleTokenInfo() {
newKeyNamed(WIPE_KEY),
newKeyNamed(FEE_SCHEDULE_KEY),
newKeyNamed(PAUSE_KEY),
+ newKeyNamed(TokenKeyType.METADATA_KEY.name()),
uploadInitCode(TOKEN_INFO_CONTRACT),
contractCreate(TOKEN_INFO_CONTRACT).gas(1_000_000L),
+ uploadInitCode("TokenInfo"),
+ contractCreate("TokenInfo").gas(1_000_000L),
tokenCreate(FEE_DENOM).treasury(HTS_COLLECTOR),
tokenCreate(NON_FUNGIBLE_TOKEN_NAME)
.tokenType(TokenType.NON_FUNGIBLE_UNIQUE)
@@ -404,6 +468,8 @@ final Stream happyPathGetNonFungibleTokenInfo() {
.wipeKey(WIPE_KEY)
.feeScheduleKey(FEE_SCHEDULE_KEY)
.pauseKey(PAUSE_KEY)
+ .metadataKey(TokenKeyType.METADATA_KEY.name())
+ .metaData("metadata")
.withCustom(royaltyFeeWithFallback(
1, 2, fixedHtsFeeInheritingRoyaltyCollector(100, FEE_DENOM), HTS_COLLECTOR))
.via(CREATE_TXN),
@@ -435,7 +501,15 @@ final Stream happyPathGetNonFungibleTokenInfo() {
GET_INFORMATION_FOR_NON_FUNGIBLE_TOKEN,
HapiParserUtil.asHeadlongAddress(
asAddress(spec.registry().getTokenID(NON_FUNGIBLE_TOKEN_NAME))),
- 1L))))
+ 1L),
+ contractCall(
+ "TokenInfo",
+ "getInformationForNonFungibleTokenV2",
+ HapiParserUtil.asHeadlongAddress(
+ asAddress(spec.registry().getTokenID(NON_FUNGIBLE_TOKEN_NAME))),
+ 1L)
+ .via("NON_FUNGIBLE_TOKEN_INFO_TXN_V2")
+ .gas(1_000_000L))))
.then(exposeTargetLedgerIdTo(targetLedgerId::set), withOpContext((spec, opLog) -> {
final var getTokenInfoQuery = getTokenInfo(NON_FUNGIBLE_TOKEN_NAME);
allRunFor(spec, getTokenInfoQuery);
@@ -473,6 +547,24 @@ final Stream happyPathGetNonFungibleTokenInfo() {
getTokenKeyFromSpec(spec, TokenKeyType.ADMIN_KEY),
expirySecond,
targetLedgerId.get()))
+ .withNftTokenInfo(nftTokenInfo)))),
+ childRecordsCheck(
+ "NON_FUNGIBLE_TOKEN_INFO_TXN_V2",
+ SUCCESS,
+ recordWith()
+ .status(SUCCESS)
+ .contractCallResult(resultWith()
+ .contractCallResult(htsPrecompileResult()
+ .forFunction(
+ FunctionType.HAPI_GET_NON_FUNGIBLE_TOKEN_INFO_V2)
+ .withStatus(SUCCESS)
+ .withTokenInfo(getTokenInfoStructForNonFungibleTokenV2(
+ spec,
+ spec.registry()
+ .getAccountID(TOKEN_TREASURY),
+ getTokenKeyFromSpec(spec, TokenKeyType.ADMIN_KEY),
+ expirySecond,
+ targetLedgerId.get()))
.withNftTokenInfo(nftTokenInfo)))));
}));
}
@@ -1487,6 +1579,7 @@ final Stream happyPathUpdateTokenKeysAndReadLatestInformation() {
HapiParserUtil.asHeadlongAddress(
asAddress(spec.registry().getContractId(TOKEN_INFO_CONTRACT))))
.via(UPDATE_AND_GET_TOKEN_KEYS_INFO_TXN)
+ .gas(117000L)
.alsoSigningWithFullPrefix(MULTI_KEY))))
.then(withOpContext((spec, opLog) -> allRunFor(
spec,
@@ -1590,18 +1683,21 @@ private TokenNftInfo getTokenNftInfoForCheck(
.build();
}
- private TokenNftInfo getEmptyNft() {
- return TokenNftInfo.newBuilder()
- .setLedgerId(ByteString.empty())
- .setNftID(NftID.getDefaultInstance())
- .setAccountID(AccountID.getDefaultInstance())
- .setCreationTime(Timestamp.newBuilder().build())
- .setMetadata(ByteString.empty())
- .setSpenderId(AccountID.getDefaultInstance())
+ private TokenInfo getTokenInfoStructForFungibleToken(
+ final HapiSpec spec,
+ final String tokenName,
+ final String symbol,
+ final String memo,
+ final AccountID treasury,
+ final Key adminKey,
+ final long expirySecond,
+ ByteString ledgerId) {
+
+ return buildBaseTokenInfo(spec, tokenName, symbol, memo, treasury, adminKey, expirySecond, ledgerId)
.build();
}
- private TokenInfo getTokenInfoStructForFungibleToken(
+ private TokenInfo getTokenInfoStructForFungibleTokenV2(
final HapiSpec spec,
final String tokenName,
final String symbol,
@@ -1610,9 +1706,27 @@ private TokenInfo getTokenInfoStructForFungibleToken(
final Key adminKey,
final long expirySecond,
ByteString ledgerId) {
- final var autoRenewAccount = spec.registry().getAccountID(AUTO_RENEW_ACCOUNT);
- final ArrayList customFees = getExpectedCustomFees(spec);
+ final ByteString meta = ByteString.copyFrom("metadata".getBytes(StandardCharsets.UTF_8));
+
+ return buildBaseTokenInfo(spec, tokenName, symbol, memo, treasury, adminKey, expirySecond, ledgerId)
+ .setMetadata(meta)
+ .setMetadataKey(getTokenKeyFromSpec(spec, TokenKeyType.METADATA_KEY))
+ .build();
+ }
+
+ private TokenInfo.Builder buildBaseTokenInfo(
+ final HapiSpec spec,
+ final String tokenName,
+ final String symbol,
+ final String memo,
+ final AccountID treasury,
+ final Key adminKey,
+ final long expirySecond,
+ ByteString ledgerId) {
+
+ final var autoRenewAccount = spec.registry().getAccountID(AUTO_RENEW_ACCOUNT);
+ final var customFees = getExpectedCustomFees(spec);
return TokenInfo.newBuilder()
.setLedgerId(ledgerId)
@@ -1635,41 +1749,7 @@ private TokenInfo getTokenInfoStructForFungibleToken(
.setWipeKey(getTokenKeyFromSpec(spec, TokenKeyType.WIPE_KEY))
.setSupplyKey(getTokenKeyFromSpec(spec, TokenKeyType.SUPPLY_KEY))
.setFeeScheduleKey(getTokenKeyFromSpec(spec, TokenKeyType.FEE_SCHEDULE_KEY))
- .setPauseKey(getTokenKeyFromSpec(spec, TokenKeyType.PAUSE_KEY))
- .build();
- }
-
- private TokenInfo getTokenInfoStructForEmptyFungibleToken(
- final String tokenName,
- final String symbol,
- final String memo,
- final AccountID treasury,
- final long expirySecond,
- ByteString ledgerId) {
-
- final ArrayList customFees = new ArrayList<>();
-
- return TokenInfo.newBuilder()
- .setLedgerId(ledgerId)
- .setSupplyTypeValue(0)
- .setExpiry(Timestamp.newBuilder().setSeconds(expirySecond))
- .setAutoRenewAccount(AccountID.getDefaultInstance())
- .setAutoRenewPeriod(Duration.newBuilder().setSeconds(0).build())
- .setSymbol(symbol)
- .setName(tokenName)
- .setMemo(memo)
- .setTreasury(treasury)
- .setTotalSupply(0)
- .setMaxSupply(0)
- .addAllCustomFees(customFees)
- .setAdminKey(Key.newBuilder().build())
- .setKycKey(Key.newBuilder().build())
- .setFreezeKey(Key.newBuilder().build())
- .setWipeKey(Key.newBuilder().build())
- .setSupplyKey(Key.newBuilder().build())
- .setFeeScheduleKey(Key.newBuilder().build())
- .setPauseKey(Key.newBuilder().build())
- .build();
+ .setPauseKey(getTokenKeyFromSpec(spec, TokenKeyType.PAUSE_KEY));
}
@NonNull
@@ -1721,9 +1801,43 @@ private TokenInfo getTokenInfoStructForNonFungibleToken(
final Key adminKey,
final long expirySecond,
final ByteString ledgerId) {
+ return buildTokenInfo(spec, tokenName, symbol, memo, treasury, adminKey, expirySecond, ledgerId, null, false);
+ }
+
+ private TokenInfo getTokenInfoStructForNonFungibleTokenV2(
+ final HapiSpec spec,
+ final AccountID treasury,
+ final Key adminKey,
+ final long expirySecond,
+ final ByteString ledgerId) {
+ final ByteString meta = ByteString.copyFrom("metadata".getBytes(StandardCharsets.UTF_8));
+ return buildTokenInfo(
+ spec,
+ TokenInfoHTSSuite.NON_FUNGIBLE_TOKEN_NAME,
+ TokenInfoHTSSuite.NON_FUNGIBLE_SYMBOL,
+ TokenInfoHTSSuite.MEMO,
+ treasury,
+ adminKey,
+ expirySecond,
+ ledgerId,
+ meta,
+ true);
+ }
+
+ private TokenInfo buildTokenInfo(
+ final HapiSpec spec,
+ final String tokenName,
+ final String symbol,
+ final String memo,
+ final AccountID treasury,
+ final Key adminKey,
+ final long expirySecond,
+ final ByteString ledgerId,
+ final ByteString metadata,
+ final boolean includeMetadataKey) {
final var autoRenewAccount = spec.registry().getAccountID(AUTO_RENEW_ACCOUNT);
- return TokenInfo.newBuilder()
+ TokenInfo.Builder builder = TokenInfo.newBuilder()
.setLedgerId(ledgerId)
.setSupplyTypeValue(TokenSupplyType.FINITE_VALUE)
.setExpiry(Timestamp.newBuilder().setSeconds(expirySecond))
@@ -1744,8 +1858,17 @@ private TokenInfo getTokenInfoStructForNonFungibleToken(
.setWipeKey(getTokenKeyFromSpec(spec, TokenKeyType.WIPE_KEY))
.setSupplyKey(getTokenKeyFromSpec(spec, TokenKeyType.SUPPLY_KEY))
.setFeeScheduleKey(getTokenKeyFromSpec(spec, TokenKeyType.FEE_SCHEDULE_KEY))
- .setPauseKey(getTokenKeyFromSpec(spec, TokenKeyType.PAUSE_KEY))
- .build();
+ .setPauseKey(getTokenKeyFromSpec(spec, TokenKeyType.PAUSE_KEY));
+
+ if (metadata != null) {
+ builder.setMetadata(metadata);
+ }
+
+ if (includeMetadataKey) {
+ builder.setMetadataKey(getTokenKeyFromSpec(spec, TokenKeyType.METADATA_KEY));
+ }
+
+ return builder.build();
}
@NonNull
@@ -1794,8 +1917,4 @@ private Key getTokenKeyFromSpec(final HapiSpec spec, final TokenKeyType type) {
return keyBuilder.build();
}
-
- private ByteString fromString(final String value) {
- return ByteString.copyFrom(Bytes.fromHexString(value).toArray());
- }
}
diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/WipeTokenAccountPrecompileSuite.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/WipeTokenAccountPrecompileSuite.java
index 46e760a654dc..36fb00907789 100644
--- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/WipeTokenAccountPrecompileSuite.java
+++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/WipeTokenAccountPrecompileSuite.java
@@ -200,7 +200,7 @@ final Stream wipeFungibleTokenScenarios() {
.contractCallResult(resultWith()
.contractCallResult(
htsPrecompileResult().withStatus(SUCCESS))
- .gasUsed(14085L))),
+ .gasUsed(15284L))),
getTokenInfo(VANILLA_TOKEN).hasTotalSupply(990),
getAccountBalance(ACCOUNT).hasTokenBalance(VANILLA_TOKEN, 490));
}
@@ -335,7 +335,7 @@ final Stream wipeNonFungibleTokenScenarios() {
.contractCallResult(resultWith()
.contractCallResult(
htsPrecompileResult().withStatus(SUCCESS))
- .gasUsed(14085L))),
+ .gasUsed(15284L))),
getTokenInfo(VANILLA_TOKEN).hasTotalSupply(1),
getAccountBalance(ACCOUNT).hasTokenBalance(VANILLA_TOKEN, 0));
}
diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/token/GasCalculationIntegrityTest.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/token/GasCalculationIntegrityTest.java
new file mode 100644
index 000000000000..c8dac59cd2dd
--- /dev/null
+++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/contract/precompile/token/GasCalculationIntegrityTest.java
@@ -0,0 +1,450 @@
+/*
+ * Copyright (C) 2024 Hedera Hashgraph, LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.hedera.services.bdd.suites.contract.precompile.token;
+
+import static com.hedera.services.bdd.junit.ContextRequirement.UPGRADE_FILE_CONTENT;
+import static com.hedera.services.bdd.junit.TestTags.SMART_CONTRACT;
+import static com.hedera.services.bdd.spec.HapiSpec.hapiTest;
+import static com.hedera.services.bdd.spec.dsl.entities.SpecTokenKey.ADMIN_KEY;
+import static com.hedera.services.bdd.spec.dsl.entities.SpecTokenKey.PAUSE_KEY;
+import static com.hedera.services.bdd.spec.dsl.entities.SpecTokenKey.SUPPLY_KEY;
+import static com.hedera.services.bdd.spec.dsl.entities.SpecTokenKey.WIPE_KEY;
+import static com.hedera.services.bdd.spec.queries.QueryVerbs.getFileContents;
+import static com.hedera.services.bdd.spec.queries.QueryVerbs.getTxnRecord;
+import static com.hedera.services.bdd.spec.transactions.TxnVerbs.fileUpdate;
+import static com.hedera.services.bdd.spec.utilops.CustomSpecAssert.allRunFor;
+import static com.hedera.services.bdd.spec.utilops.UtilVerbs.withOpContext;
+import static com.hedera.services.bdd.suites.HapiSuite.EXCHANGE_RATES;
+import static com.hedera.services.bdd.suites.HapiSuite.ONE_HBAR;
+import static com.hedera.services.bdd.suites.HapiSuite.ONE_HUNDRED_HBARS;
+import static com.hedera.services.bdd.suites.HapiSuite.ONE_MILLION_HBARS;
+import static com.hedera.services.bdd.suites.HapiSuite.THOUSAND_HBAR;
+import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.SUCCESS;
+
+import com.google.protobuf.ByteString;
+import com.hedera.services.bdd.junit.HapiTestLifecycle;
+import com.hedera.services.bdd.junit.LeakyHapiTest;
+import com.hedera.services.bdd.junit.OrderedInIsolation;
+import com.hedera.services.bdd.junit.support.TestLifecycle;
+import com.hedera.services.bdd.spec.dsl.annotations.Account;
+import com.hedera.services.bdd.spec.dsl.annotations.Contract;
+import com.hedera.services.bdd.spec.dsl.annotations.FungibleToken;
+import com.hedera.services.bdd.spec.dsl.annotations.NonFungibleToken;
+import com.hedera.services.bdd.spec.dsl.entities.SpecAccount;
+import com.hedera.services.bdd.spec.dsl.entities.SpecContract;
+import com.hedera.services.bdd.spec.dsl.entities.SpecFungibleToken;
+import com.hedera.services.bdd.spec.dsl.entities.SpecNonFungibleToken;
+import com.hedera.services.bdd.spec.transactions.file.HapiFileUpdate;
+import com.hedera.services.bdd.spec.utilops.CustomSpecAssert;
+import edu.umd.cs.findbugs.annotations.NonNull;
+import java.math.BigInteger;
+import java.util.List;
+import java.util.stream.Stream;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.DisplayName;
+import org.junit.jupiter.api.DynamicTest;
+import org.junit.jupiter.api.Order;
+import org.junit.jupiter.api.Tag;
+
+@Tag(SMART_CONTRACT)
+@DisplayName("Gas Integrity Tests for Token Contracts")
+@HapiTestLifecycle
+@OrderedInIsolation
+@SuppressWarnings("java:S1192")
+public class GasCalculationIntegrityTest {
+
+ @Contract(contract = "NumericContract", creationGas = 1_000_000L)
+ static SpecContract numericContract;
+
+ @Contract(contract = "NumericContractComplex", creationGas = 1_000_000L)
+ static SpecContract numericContractComplex;
+
+ @Contract(contract = "TokenInfoContract", creationGas = 1_000_000L)
+ static SpecContract tokenInfoContract;
+
+ @Contract(contract = "ERC20Contract", creationGas = 1_000_000L)
+ static SpecContract erc20Contract;
+
+ @Account(maxAutoAssociations = 10, tinybarBalance = ONE_MILLION_HBARS)
+ static SpecAccount alice;
+
+ @Account(maxAutoAssociations = 10, tinybarBalance = ONE_MILLION_HBARS)
+ static SpecAccount bob;
+
+ @FungibleToken
+ static SpecFungibleToken token;
+
+ @FungibleToken(
+ initialSupply = 1_000L,
+ maxSupply = 1_200L,
+ keys = {SUPPLY_KEY, PAUSE_KEY, ADMIN_KEY, WIPE_KEY})
+ static SpecFungibleToken fungibleToken;
+
+ @NonFungibleToken(
+ numPreMints = 7,
+ keys = {SUPPLY_KEY, PAUSE_KEY, ADMIN_KEY, WIPE_KEY})
+ static SpecNonFungibleToken nft;
+
+ public static final long EXPIRY_RENEW = 3_000_000L;
+
+ private final Stream testCases = Stream.of(
+ new RatesProvider(30000, 16197),
+ new RatesProvider(30000, 359789),
+ new RatesProvider(30000, 2888899),
+ new RatesProvider(30000, 269100));
+
+ private record RatesProvider(int hBarEquiv, int centEquiv) {}
+
+ @BeforeAll
+ public static void beforeAll(final @NonNull TestLifecycle lifecycle) {
+ // Fetch exchange rates before tests
+ lifecycle.doAdhoc(
+ // Save exchange rates before tests
+ withOpContext((spec, opLog) -> {
+ var fetch = getFileContents(EXCHANGE_RATES).logged();
+ allRunFor(spec, fetch);
+ final var validRates = fetch.getResponse()
+ .getFileGetContents()
+ .getFileContents()
+ .getContents();
+ spec.registry().saveBytes("originalRates", validRates);
+ }),
+
+ // Authorizations
+ fungibleToken.authorizeContracts(numericContractComplex),
+ nft.authorizeContracts(numericContractComplex),
+ numericContract.associateTokens(fungibleToken, nft),
+
+ // Approvals
+ fungibleToken.treasury().approveTokenAllowance(fungibleToken, numericContractComplex, 1000L),
+ nft.treasury().approveNFTAllowance(nft, numericContractComplex, true, List.of(1L, 2L, 3L, 4L, 5L)),
+ alice.approveCryptoAllowance(numericContractComplex, ONE_HBAR),
+ // Transfers
+ fungibleToken.treasury().transferUnitsTo(numericContract, 100L, fungibleToken),
+ nft.treasury().transferNFTsTo(numericContract, nft, 7L),
+ alice.transferHBarsTo(numericContractComplex, ONE_HUNDRED_HBARS));
+ }
+
+ @LeakyHapiTest(requirement = UPGRADE_FILE_CONTENT)
+ @Order(1)
+ @DisplayName("when using nft via redirect proxy contract")
+ public Stream approveViaProxyNft() {
+ return testCases.flatMap(rates -> hapiTest(
+ updateRates(rates.hBarEquiv, rates.centEquiv),
+ numericContract
+ .call("approveRedirect", nft, bob, BigInteger.valueOf(7))
+ .gas(756_729L)
+ .via("approveRedirectTxn")
+ .andAssert(txn -> txn.hasKnownStatus(SUCCESS)),
+ restoreOriginalRates(),
+ getTxnRecord("approveRedirectTxn").logged()));
+ }
+
+ @LeakyHapiTest(requirement = UPGRADE_FILE_CONTENT)
+ @Order(2)
+ @DisplayName("when using fungible token hts system contract")
+ public Stream approveFungibleToken() {
+ return testCases.flatMap(rates -> hapiTest(
+ updateRates(rates.hBarEquiv, rates.centEquiv),
+ numericContract
+ .call("approve", fungibleToken, alice, BigInteger.TWO)
+ .gas(742_877L)
+ .via("approveTxn")
+ .andAssert(txn -> txn.hasKnownStatus(SUCCESS)),
+ restoreOriginalRates(),
+ getTxnRecord("approveTxn").logged()));
+ }
+
+ @LeakyHapiTest(requirement = UPGRADE_FILE_CONTENT)
+ @Order(3)
+ @DisplayName("when using createFungibleTokenWithCustomFeesV3 with fractionalFee")
+ public Stream createFungibleTokenWithCustomFeesV3FractionalFee() {
+ final long nominator = 1;
+ final long denominator = 1;
+ final long maxAmount = 500;
+ final long minAmount = 100;
+ return testCases.flatMap(rates -> hapiTest(
+ updateRates(rates.hBarEquiv, rates.centEquiv),
+ numericContractComplex
+ .call(
+ "createFungibleTokenWithCustomFeesV3FractionalFee",
+ nominator,
+ denominator,
+ minAmount,
+ maxAmount)
+ .gas(165_038L)
+ .sending(THOUSAND_HBAR)
+ .via("createWithCustomFeeFractional")
+ .andAssert(txn -> txn.hasKnownStatus(SUCCESS)),
+ restoreOriginalRates(),
+ getTxnRecord("createWithCustomFeeFractional").logged()));
+ }
+
+ @LeakyHapiTest(requirement = UPGRADE_FILE_CONTENT)
+ @Order(4)
+ @DisplayName("when using createNonFungibleTokenWithCustomFeesV3 with fractionalFee")
+ public Stream createNonFungibleTokenWithCustomRoyaltyFeesV3() {
+ return testCases.flatMap(rates -> hapiTest(
+ updateRates(rates.hBarEquiv, rates.centEquiv),
+ numericContractComplex
+ .call("createNonFungibleTokenWithCustomRoyaltyFeesV3", alice.getED25519KeyBytes(), 1L, 2L, 10L)
+ .gas(169_584L)
+ .sending(THOUSAND_HBAR)
+ .payingWith(alice)
+ .via("createWithCustomFeeRoyalty")
+ .andAssert(txn -> txn.hasKnownStatus(SUCCESS)),
+ restoreOriginalRates(),
+ getTxnRecord("createWithCustomFeeRoyalty").logged()));
+ }
+
+ @LeakyHapiTest(requirement = UPGRADE_FILE_CONTENT)
+ @DisplayName("when using createFungibleToken")
+ public Stream createFungible() {
+ return testCases.flatMap(rates -> hapiTest(
+ updateRates(rates.hBarEquiv, rates.centEquiv),
+ numericContractComplex
+ .call("createFungibleToken", EXPIRY_RENEW, EXPIRY_RENEW, 10000L, BigInteger.TEN, BigInteger.TWO)
+ .gas(165_800L)
+ .sending(THOUSAND_HBAR)
+ .via("createFungibleToken")
+ .andAssert(txn -> txn.hasKnownStatus(SUCCESS)),
+ restoreOriginalRates(),
+ getTxnRecord("createFungibleToken").logged()));
+ }
+
+ @LeakyHapiTest(requirement = UPGRADE_FILE_CONTENT)
+ @Order(5)
+ @DisplayName("when using createNonFungibleTokenV3")
+ public Stream createNonFungibleTokenV3() {
+ return testCases.flatMap(rates -> hapiTest(
+ updateRates(rates.hBarEquiv, rates.centEquiv),
+ numericContractComplex
+ .call("createNonFungibleTokenV3", alice.getED25519KeyBytes(), EXPIRY_RENEW, EXPIRY_RENEW, 10L)
+ .gas(166_944L)
+ .sending(THOUSAND_HBAR)
+ .payingWith(alice)
+ .via("createNonFungibleTokenV3")
+ .andAssert(txn -> txn.hasKnownStatus(SUCCESS)),
+ restoreOriginalRates(),
+ getTxnRecord("createNonFungibleTokenV3").logged()));
+ }
+
+ @LeakyHapiTest(requirement = UPGRADE_FILE_CONTENT)
+ @Order(6)
+ @DisplayName("when using cryptoTransferV2 for hBar transfer")
+ public Stream useCryptoTransferV2() {
+ return testCases.flatMap(rates -> hapiTest(
+ updateRates(rates.hBarEquiv, rates.centEquiv),
+ numericContractComplex
+ .call("cryptoTransferV2", new long[] {-5, 5}, alice, bob)
+ .gas(33_304L)
+ .via("cryptoTransferV2")
+ .andAssert(txn -> txn.hasKnownStatus(SUCCESS)),
+ restoreOriginalRates(),
+ getTxnRecord("cryptoTransferV2").logged()));
+ }
+
+ @LeakyHapiTest(requirement = UPGRADE_FILE_CONTENT)
+ @Order(7)
+ @DisplayName("when using cryptoTransferFungibleV1 with internal auto associate")
+ public Stream useCryptoTransferFungibleV1() {
+ return testCases.flatMap(rates -> hapiTest(
+ updateRates(rates.hBarEquiv, rates.centEquiv),
+ numericContractComplex
+ .call(
+ "cryptoTransferFungibleV1",
+ fungibleToken,
+ new long[] {-5, 5},
+ fungibleToken.treasury(),
+ bob)
+ .via("cryptoTransferFungibleV1")
+ .gas(763_480L),
+ restoreOriginalRates(),
+ getTxnRecord("cryptoTransferFungibleV1").logged()));
+ }
+
+ @LeakyHapiTest(requirement = UPGRADE_FILE_CONTENT)
+ @Order(8)
+ @DisplayName("when using cryptoTransferNonFungible with internal auto associate")
+ public Stream useCryptoTransferNonFungible() {
+ return testCases.flatMap(rates -> hapiTest(
+ updateRates(rates.hBarEquiv, rates.centEquiv),
+ numericContractComplex
+ .call("cryptoTransferNonFungible", nft, nft.treasury(), bob, 1L)
+ .gas(761_070L)
+ .via("cryptoTransferNonFungible")
+ .andAssert(txn -> txn.hasKnownStatus(SUCCESS)),
+ restoreOriginalRates(),
+ getTxnRecord("cryptoTransferNonFungible").logged(),
+ bob.transferNFTsTo(nft.treasury(), nft, 1L)));
+ }
+
+ @LeakyHapiTest(requirement = UPGRADE_FILE_CONTENT)
+ @Order(9)
+ @DisplayName("when using transferNFTs with internal auto associate")
+ public Stream useTransferNFTs() {
+ return testCases.flatMap(rates -> hapiTest(
+ updateRates(rates.hBarEquiv, rates.centEquiv),
+ numericContractComplex
+ .call("transferNFTs", nft, nft.treasury(), alice, new long[] {4L})
+ .via("transferNFTs")
+ .gas(761_519L),
+ restoreOriginalRates(),
+ getTxnRecord("transferNFTs").logged(),
+ alice.transferNFTsTo(nft.treasury(), nft, 4L)));
+ }
+
+ @LeakyHapiTest(requirement = UPGRADE_FILE_CONTENT)
+ @Order(10)
+ @DisplayName("when using transferToken with internal auto associate")
+ public Stream useTransferToken() {
+ return testCases.flatMap(rates -> hapiTest(
+ updateRates(rates.hBarEquiv, rates.centEquiv),
+ numericContractComplex
+ .call("transferTokenTest", fungibleToken, fungibleToken.treasury(), alice, 1L)
+ .via("transferTokenTest")
+ .gas(758_568L),
+ restoreOriginalRates(),
+ getTxnRecord("transferTokenTest").logged()));
+ }
+
+ @LeakyHapiTest(requirement = UPGRADE_FILE_CONTENT)
+ @Order(11)
+ @DisplayName("when using transferNFT")
+ public Stream useTransferNFT() {
+ // Cannot be tested directly as it requires associate from previous test
+ return testCases.flatMap(rates -> hapiTest(
+ updateRates(rates.hBarEquiv, rates.centEquiv),
+ numericContractComplex
+ .call("transferNFTTest", nft, nft.treasury(), alice, 3L)
+ .via("transferNFTTest")
+ .gas(42_235L),
+ restoreOriginalRates(),
+ getTxnRecord("transferNFTTest").logged(),
+ alice.transferNFTsTo(nft.treasury(), nft, 3L)));
+ }
+
+ @LeakyHapiTest(requirement = UPGRADE_FILE_CONTENT)
+ @Order(12)
+ @DisplayName("when using transferFrom")
+ public Stream useTransferFrom() {
+ // Cannot be tested directly as it requires associate from previous test
+ return testCases.flatMap(rates -> hapiTest(
+ updateRates(rates.hBarEquiv, rates.centEquiv),
+ numericContractComplex
+ .call("transferFrom", fungibleToken, fungibleToken.treasury(), alice, BigInteger.ONE)
+ .via("transferFrom")
+ .gas(42_264L),
+ restoreOriginalRates(),
+ getTxnRecord("transferFrom").logged()));
+ }
+
+ @LeakyHapiTest(requirement = UPGRADE_FILE_CONTENT)
+ @Order(13)
+ @DisplayName("when using transferFromERC")
+ public Stream useTransferFromERC() {
+ // Cannot be tested directly as it requires associate from previous test
+ return testCases.flatMap(rates -> hapiTest(
+ updateRates(rates.hBarEquiv, rates.centEquiv),
+ numericContractComplex
+ .call("transferFromERC", fungibleToken, fungibleToken.treasury(), alice, BigInteger.ONE)
+ .via("transferFromERC")
+ .gas(44_900L),
+ restoreOriginalRates(),
+ getTxnRecord("transferFromERC").logged()));
+ }
+
+ @LeakyHapiTest(requirement = UPGRADE_FILE_CONTENT)
+ @Order(14)
+ @DisplayName("when using transferFromNFT")
+ public Stream useTransferNFTFrom() {
+ // Cannot be tested directly as it requires associate from previous test
+ return testCases.flatMap(rates -> hapiTest(
+ updateRates(rates.hBarEquiv, rates.centEquiv),
+ numericContractComplex
+ .call("transferFromNFT", nft, nft.treasury(), alice, BigInteger.TWO)
+ .via("transferFromNFT")
+ .gas(42_263L),
+ getTxnRecord("transferFromNFT").logged(),
+ restoreOriginalRates(),
+ alice.transferNFTsTo(nft.treasury(), nft, 2L)));
+ }
+
+ @LeakyHapiTest(requirement = UPGRADE_FILE_CONTENT)
+ @Order(15)
+ @DisplayName("for token info call")
+ public Stream checkTokenGetInfoGas() {
+ return testCases.flatMap(ratesProvider -> hapiTest(
+ updateRates(ratesProvider.hBarEquiv, ratesProvider.centEquiv),
+ tokenInfoContract
+ .call("getInformationForToken", token)
+ .gas(78_805L)
+ .via("tokenInfo"),
+ restoreOriginalRates(),
+ getTxnRecord("tokenInfo").logged()));
+ }
+
+ @LeakyHapiTest(requirement = UPGRADE_FILE_CONTENT)
+ @Order(16)
+ @DisplayName("for token custom fees call")
+ public Stream checkTokenGetCustomFeesGas() {
+ return testCases.flatMap(ratesProvider -> hapiTest(
+ updateRates(ratesProvider.hBarEquiv, ratesProvider.centEquiv),
+ tokenInfoContract
+ .call("getCustomFeesForToken", token)
+ .gas(31_421L)
+ .via("customFees"),
+ restoreOriginalRates(),
+ getTxnRecord("customFees").logged()));
+ }
+
+ @LeakyHapiTest(requirement = UPGRADE_FILE_CONTENT)
+ @Order(17)
+ @DisplayName("for token name call")
+ public Stream checkErc20Name() {
+ return testCases.flatMap(ratesProvider -> hapiTest(
+ updateRates(ratesProvider.hBarEquiv, ratesProvider.centEquiv),
+ erc20Contract.call("name", token).gas(30_207L).via("name"),
+ restoreOriginalRates(),
+ getTxnRecord("name").logged()));
+ }
+
+ @LeakyHapiTest(requirement = UPGRADE_FILE_CONTENT)
+ @Order(18)
+ @DisplayName("for token balance of call")
+ public Stream checkErc20BalanceOf() {
+ return testCases.flatMap(ratesProvider -> hapiTest(
+ updateRates(ratesProvider.hBarEquiv, ratesProvider.centEquiv),
+ erc20Contract.call("balanceOf", token, alice).gas(30_074L).via("balance"),
+ restoreOriginalRates(),
+ getTxnRecord("balance").logged()));
+ }
+
+ private static HapiFileUpdate updateRates(final int hBarEquiv, final int centEquiv) {
+ return fileUpdate(EXCHANGE_RATES)
+ .contents(spec ->
+ spec.ratesProvider().rateSetWith(hBarEquiv, centEquiv).toByteString());
+ }
+
+ private static CustomSpecAssert restoreOriginalRates() {
+ return withOpContext((spec, opLog) -> {
+ var resetRatesOp = fileUpdate(EXCHANGE_RATES)
+ .contents(contents -> ByteString.copyFrom(spec.registry().getBytes("originalRates")));
+ allRunFor(spec, resetRatesOp);
+ });
+ }
+}
diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/crypto/CryptoUpdateSuite.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/crypto/CryptoUpdateSuite.java
index 04c5f8d0271e..4b038e755376 100644
--- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/crypto/CryptoUpdateSuite.java
+++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/crypto/CryptoUpdateSuite.java
@@ -35,6 +35,7 @@
import static com.hedera.services.bdd.spec.transactions.TxnVerbs.contractCreate;
import static com.hedera.services.bdd.spec.transactions.TxnVerbs.contractUpdate;
import static com.hedera.services.bdd.spec.transactions.TxnVerbs.cryptoCreate;
+import static com.hedera.services.bdd.spec.transactions.TxnVerbs.cryptoDelete;
import static com.hedera.services.bdd.spec.transactions.TxnVerbs.cryptoTransfer;
import static com.hedera.services.bdd.spec.transactions.TxnVerbs.cryptoUpdate;
import static com.hedera.services.bdd.spec.transactions.TxnVerbs.tokenAssociate;
@@ -50,6 +51,7 @@
import static com.hedera.services.bdd.spec.utilops.mod.ModificationUtils.withSuccessivelyVariedBodyIds;
import static com.hedera.services.bdd.spec.utilops.records.SnapshotMatchMode.EXPECT_STREAMLINED_INGEST_RECORDS;
import static com.hedera.services.bdd.spec.utilops.records.SnapshotMatchMode.NONDETERMINISTIC_TRANSACTION_FEES;
+import static com.hedera.services.bdd.suites.HapiSuite.DEFAULT_PAYER;
import static com.hedera.services.bdd.suites.HapiSuite.GENESIS;
import static com.hedera.services.bdd.suites.HapiSuite.ONE_HBAR;
import static com.hedera.services.bdd.suites.HapiSuite.ONE_HUNDRED_HBARS;
@@ -57,6 +59,7 @@
import static com.hedera.services.bdd.suites.HapiSuite.ZERO_BYTE_MEMO;
import static com.hedera.services.bdd.suites.contract.hapi.ContractUpdateSuite.ADMIN_KEY;
import static com.hederahashgraph.api.proto.java.HederaFunctionality.CryptoUpdate;
+import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.ACCOUNT_DELETED;
import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.EXISTING_AUTOMATIC_ASSOCIATIONS_EXCEED_GIVEN_LIMIT;
import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.INVALID_ADMIN_KEY;
import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.INVALID_EXPIRATION_TIME;
@@ -549,4 +552,16 @@ final Stream updateMaxAutoAssociationsWorks() {
contractUpdate(CONTRACT).newMaxAutomaticAssociations(-1).hasKnownStatus(SUCCESS),
getContractInfo(CONTRACT).has(contractWith().maxAutoAssociations(-1)));
}
+
+ @HapiTest
+ final Stream deletedAccountCannotBeUpdated() {
+ final var accountToDelete = "accountToDelete";
+ return hapiTest(
+ cryptoCreate(accountToDelete).declinedReward(false),
+ cryptoDelete(accountToDelete),
+ cryptoUpdate(accountToDelete)
+ .payingWith(DEFAULT_PAYER)
+ .newDeclinedReward(true)
+ .hasKnownStatus(ACCOUNT_DELETED));
+ }
}
diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/ethereum/NonceSuite.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/ethereum/NonceSuite.java
index e65316c1f733..8883edbd1b25 100644
--- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/ethereum/NonceSuite.java
+++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/ethereum/NonceSuite.java
@@ -80,7 +80,7 @@
public class NonceSuite {
private static final long LOW_GAS_PRICE = 1L;
private static final long ENOUGH_GAS_PRICE = 75L;
- private static final long ENOUGH_GAS_LIMIT = 150_000L;
+ private static final long ENOUGH_GAS_LIMIT = 215_000L;
private static final String RECEIVER = "receiver";
private static final String INTERNAL_CALLEE_CONTRACT = "InternalCallee";
private static final String INTERNAL_CALLER_CONTRACT = "InternalCaller";
diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/hip993/NaturalDispatchOrderingTest.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/hip993/NaturalDispatchOrderingTest.java
index a6418db0ea2e..906f44b67b4b 100644
--- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/hip993/NaturalDispatchOrderingTest.java
+++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/hip993/NaturalDispatchOrderingTest.java
@@ -18,7 +18,7 @@
import static com.hedera.services.bdd.junit.hedera.NodeSelector.byNodeId;
import static com.hedera.services.bdd.junit.hedera.utils.WorkingDirUtils.guaranteedExtantDir;
-import static com.hedera.services.bdd.junit.support.RecordStreamAccess.RECORD_STREAM_ACCESS;
+import static com.hedera.services.bdd.junit.support.StreamFileAccess.STREAM_FILE_ACCESS;
import static com.hedera.services.bdd.spec.HapiSpec.hapiTest;
import static com.hedera.services.bdd.spec.dsl.operations.transactions.TouchBalancesOperation.touchBalanceOf;
import static com.hedera.services.bdd.spec.keys.TrieSigMapGenerator.uniqueWithFullPrefixesFor;
@@ -30,7 +30,7 @@
import static com.hedera.services.bdd.spec.transactions.token.TokenMovement.moving;
import static com.hedera.services.bdd.spec.utilops.CustomSpecAssert.allRunFor;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.createHollow;
-import static com.hedera.services.bdd.spec.utilops.UtilVerbs.streamMustIncludeNoFailuresFrom;
+import static com.hedera.services.bdd.spec.utilops.UtilVerbs.recordStreamMustIncludeNoFailuresFrom;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.visibleNonSyntheticItems;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.withOpContext;
import static com.hedera.services.bdd.suites.HapiSuite.DEFAULT_PAYER;
@@ -51,6 +51,7 @@
import static java.util.Objects.requireNonNull;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import com.esaulpaugh.headlong.abi.Function;
import com.hedera.node.app.spi.workflows.HandleContext.TransactionCategory;
@@ -107,7 +108,7 @@ public class NaturalDispatchOrderingTest {
@BeforeAll
static void setUp(@NonNull final TestLifecycle testLifecycle) {
testLifecycle.doAdhoc(withOpContext((spec, opLog) -> {
- unsubscribe = RECORD_STREAM_ACCESS.subscribe(
+ unsubscribe = STREAM_FILE_ACCESS.subscribe(
guaranteedExtantDir(spec.streamsLoc(byNodeId(0))), new StreamDataListener() {});
triggerAndCloseAtLeastOneFile(spec);
}));
@@ -133,7 +134,7 @@ static void cleanUp() {
@DisplayName("reversible user stream items are as expected")
final Stream reversibleUserItemsAsExpected() {
return hapiTest(
- streamMustIncludeNoFailuresFrom(
+ recordStreamMustIncludeNoFailuresFrom(
visibleNonSyntheticItems(reversibleUserValidator(), "firstCreation", "duplicateCreation")),
scheduleCreate(
"scheduledTxn",
@@ -172,7 +173,7 @@ final Stream reversibleChildAndRemovablePrecedingItemsAsExpected(
@Contract(contract = "LowLevelCall") SpecContract lowLevelCallContract) {
final var transferFunction = new Function("transferNFTThanRevertCall(address,address,address,int64)");
return hapiTest(
- streamMustIncludeNoFailuresFrom(visibleNonSyntheticItems(
+ recordStreamMustIncludeNoFailuresFrom(visibleNonSyntheticItems(
reversibleChildValidator(), "fullSuccess", "containedRevert", "fullRevert")),
nonFungibleToken.treasury().authorizeContract(transferContract),
transferContract
@@ -231,7 +232,7 @@ final Stream reversibleScheduleAndRemovablePrecedingItemsAsExpected
@Account(centBalance = 7, maxAutoAssociations = UNLIMITED_AUTO_ASSOCIATION_SLOTS)
SpecAccount insolventPayer) {
return hapiTest(
- streamMustIncludeNoFailuresFrom(
+ recordStreamMustIncludeNoFailuresFrom(
visibleNonSyntheticItems(reversibleScheduleValidator(), "committed", "rolledBack")),
firstToken.treasury().transferUnitsTo(solventPayer, 10, firstToken),
secondToken.treasury().transferUnitsTo(insolventPayer, 10, secondToken),
@@ -281,7 +282,7 @@ final Stream removableChildItemsAsExpected(
final var startChainFn = new Function("startChain(bytes)");
final var emptyMessage = new byte[0];
return hapiTest(
- streamMustIncludeNoFailuresFrom(
+ recordStreamMustIncludeNoFailuresFrom(
visibleNonSyntheticItems(removableChildValidator(), "nestedCreations", "revertedCreations")),
outerCreatorContract.call("startChain", emptyMessage).with(txn -> txn.gas(2_000_000)
.via("nestedCreations")),
@@ -315,7 +316,7 @@ final Stream removableChildItemsAsExpected(
@DisplayName("irreversible preceding stream items are as expected")
final Stream irreversiblePrecedingItemsAsExpected() {
return hapiTest(
- streamMustIncludeNoFailuresFrom(visibleNonSyntheticItems(
+ recordStreamMustIncludeNoFailuresFrom(visibleNonSyntheticItems(
irreversiblePrecedingValidator(), "finalizationBySuccess", "finalizationByFailure")),
tokenCreate("unassociatedToken"),
// Create two hollow accounts to finalize, first by a top-level success and second by failure
@@ -455,6 +456,9 @@ private static void assertParentChildStructure(
withNonce(userTransactionID, nextExpectedNonce++ - postTriggeredOffset), following.txnId());
assertEquals(userConsensusTime, following.parentConsensusTimestamp());
}
+ if (following.txnId().getScheduled()) {
+ assertTrue(following.txnRecord().getReceipt().hasExchangeRate());
+ }
}
}
diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/hip993/SystemFileExportsTest.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/hip993/SystemFileExportsTest.java
index 8ffad8f00109..ea11adb35e7b 100644
--- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/hip993/SystemFileExportsTest.java
+++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/hip993/SystemFileExportsTest.java
@@ -40,12 +40,12 @@
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.nOps;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.overriding;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.overridingTwo;
+import static com.hedera.services.bdd.spec.utilops.UtilVerbs.recordStreamMustIncludeNoFailuresFrom;
+import static com.hedera.services.bdd.spec.utilops.UtilVerbs.recordStreamMustIncludePassFrom;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.selectedItems;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.simulatePostUpgradeTransaction;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.sourcing;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.sourcingContextual;
-import static com.hedera.services.bdd.spec.utilops.UtilVerbs.streamMustIncludeNoFailuresFrom;
-import static com.hedera.services.bdd.spec.utilops.UtilVerbs.streamMustIncludePassFrom;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.validateChargedUsdWithin;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.visibleItems;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.withOpContext;
@@ -124,7 +124,7 @@ final Stream syntheticNodeDetailsUpdateHappensAtUpgradeBoundary() {
};
final AtomicReference