diff --git a/gradle/modules.properties b/gradle/modules.properties
index d99fab07d..cf4195919 100644
--- a/gradle/modules.properties
+++ b/gradle/modules.properties
@@ -7,6 +7,8 @@ com.swirlds.config.processor=com.swirlds:swirlds-config-processor
com.google.auto.service=com.google.auto.service:auto-service-annotations
com.google.auto.service.processor=com.google.auto.service:auto-service
com.google.auto.common=com.google.auto:auto-common
+com.github.spotbugs.annotations=com.github.spotbugs:spotbugs-annotations
+com.lmax.disruptor=com.lmax:disruptor
io.helidon.webserver=io.helidon.webserver:helidon-webserver
io.helidon.webserver.grpc=io.helidon.webserver:helidon-webserver-grpc
io.helidon.webserver.testing.junit5=io.helidon.webserver.testing.junit5:helidon-webserver-testing-junit5
diff --git a/protos/src/main/protobuf/blockstream.proto b/protos/src/main/protobuf/blockstream.proto
index c1d7a425d..5b9f4538c 100644
--- a/protos/src/main/protobuf/blockstream.proto
+++ b/protos/src/main/protobuf/blockstream.proto
@@ -17,80 +17,150 @@ syntax = "proto3";
*/
option java_package = "com.hedera.block.protos";
-option java_outer_classname = "BlockStreamServiceGrpcProto";
+option java_outer_classname = "BlockStreamService";
-/**
- * The BlockStreamGrpc service definition provides 2 bidirectional streaming methods for
- * exchanging blocks with the Block Node server.
- *
- * A producer (e.g. Consensus Node) can use the StreamSink method to stream blocks to the
- * Block Node server. The Block Node server will respond with a BlockResponse message for
- * each block received.
- *
- * A consumer (e.g. Mirror Node) can use the StreamSource method to request a stream of
- * blocks from the server. The consumer is expected to respond with a BlockResponse message
- * with the id of each block received.
- */
-service BlockStreamGrpc {
+service BlockStreamGrpcService {
- /**
- * StreamSink is a bidirectional streaming method that allows a producer to stream blocks
- * to the Block Node server. The server will respond with a BlockResponse message for each
- * block received.
- */
- rpc StreamSink(stream Block) returns (stream BlockResponse) {}
+ rpc publishBlockStream (stream PublishStreamRequest) returns (stream PublishStreamResponse) {}
- /**
- * StreamSource is a bidirectional streaming method that allows a consumer to request a
- * stream of blocks from the server. The consumer is expected to respond with a BlockResponse
- * message with the id of each block received.
- */
- rpc StreamSource(stream BlockResponse) returns (stream Block) {}
+ rpc subscribeBlockStream (SubscribeStreamRequest) returns (stream SubscribeStreamResponse) {}
- rpc GetBlock(Block) returns (Block) {}
+ rpc singleBlock(SingleBlockRequest) returns (SingleBlockResponse) {}
}
-/**
- * A block is a simple message that contains an id and a value.
- * This specification is a simple example meant to expedite development.
- * It will be replaced with a PBJ implementation in the future.
- */
-message Block {
- /**
- * The id of the block. Each block id should be unique.
- */
- int64 id = 1;
+message PublishStreamRequest {
+ BlockItem block_item = 1;
+}
+
+message PublishStreamResponse {
+ oneof response {
+ /**
+ * A response sent for each item and for each block.
+ */
+ ItemAcknowledgement acknowledgement = 1;
+
+ /**
+ * A response sent when a stream ends.
+ */
+ EndOfStream status = 2;
+ }
+
+ message ItemAcknowledgement {
+ bytes item_ack = 1;
+ }
+
+ message EndOfStream {
+ PublishStreamResponseCode status = 1;
+ }
/**
- * The value of the block. The value can be any string.
- */
- string value = 2;
+ * An enumeration indicating the status of this request.
+ *
+ * This enumeration describes the reason a block stream
+ * (sent via `writeBlockStream`) ended.
+ */
+ enum PublishStreamResponseCode {
+ /**
+ * An "unset value" flag, this value SHALL NOT be used.
+ * This status indicates the server software failed to set a
+ * status, and SHALL be considered a software defect.
+ */
+ STREAM_ITEMS_UNKNOWN = 0;
+
+ /**
+ * The request succeeded.
+ * No errors occurred and the source node orderly ended the stream.
+ */
+ STREAM_ITEMS_SUCCESS = 1;
+
+ /**
+ * The delay between items was too long.
+ * The source MUST start a new stream before the failed block.
+ */
+ STREAM_ITEMS_TIMEOUT = 2;
+
+ /**
+ * An item was received out-of-order.
+ * The source MUST start a new stream before the failed block.
+ */
+ STREAM_ITEMS_OUT_OF_ORDER = 3;
+
+ /**
+ * A block state proof item could not be validated.
+ * The source MUST start a new stream before the failed block.
+ */
+ STREAM_ITEMS_BAD_STATE_PROOF = 4;
+ }
}
-/**
- * A block response is a simple message that contains an id.
- * The block response message is simply meant to disambiguate it
- * from the original request. This specification is a simple
- * example meant to expedite development. It will be replaced with
- * a PBJ implementation in the future.
- */
-message BlockResponse {
- /**
- * The id of the block which was received. Each block id should
- * correlate with the id of a Block message id.
- */
- int64 id = 1;
+message SubscribeStreamRequest {
+ uint64 start_block_number = 1;
+}
+
+message SubscribeStreamResponse {
+ oneof response {
+ SubscribeStreamResponseCode status = 1;
+ BlockItem block_item = 2;
+ }
+
+ enum SubscribeStreamResponseCode {
+ READ_STREAM_UNKNOWN = 0;
+ READ_STREAM_INSUFFICIENT_BALANCE = 1;
+ READ_STREAM_SUCCESS = 2;
+ READ_STREAM_INVALID_START_BLOCK_NUMBER = 3;
+ READ_STREAM_INVALID_END_BLOCK_NUMBER = 4;
+ }
+}
+
+
+
+message Block {
+ repeated BlockItem block_items = 1;
}
/**
- * A block request is a simple message that contains an id.
+ * A BlockItem is a simple message that contains an id and a value.
* This specification is a simple example meant to expedite development.
* It will be replaced with a PBJ implementation in the future.
*/
-message BlockRequest {
- /**
- * The id of the block which was requested. Each block id should
- * correlate with the id of a Block message id.
- */
- int64 id = 1;
+message BlockItem {
+
+ oneof items {
+ BlockHeader header = 1;
+ EventMetadata start_event = 2;
+ BlockProof state_proof = 3;
+ }
+
+ string value = 4;
+}
+
+message BlockHeader {
+ uint64 block_number = 1;
+}
+
+message EventMetadata {
+ uint64 creator_id = 1;
+}
+
+message BlockProof {
+ uint64 block = 1;
+}
+
+message SingleBlockRequest {
+ uint64 block_number = 1;
+}
+
+message SingleBlockResponse {
+ oneof response {
+ SingleBlockResponseCode status = 1;
+ Block block = 2;
+ }
+
+ enum SingleBlockResponseCode {
+ READ_BLOCK_UNKNOWN = 0;
+ READ_BLOCK_INSUFFICIENT_BALANCE = 1;
+ READ_BLOCK_SUCCESS = 2;
+ READ_BLOCK_NOT_FOUND = 3;
+ READ_BLOCK_NOT_AVAILABLE = 4;
+ }
}
diff --git a/server/docker/Dockerfile b/server/docker/Dockerfile
index 58b93ceca..693784aa1 100644
--- a/server/docker/Dockerfile
+++ b/server/docker/Dockerfile
@@ -1,9 +1,20 @@
# Use Eclipse Temurin with Java 21 as the base image
FROM eclipse-temurin:21
+# Expose the port that the application will run on
+EXPOSE 8080
+
# Define version
ARG VERSION
+# Create a non-root user and group
+ARG UNAME=hedera
+ARG UID=2000
+ARG GID=2000
+RUN groupadd -g $GID -o $UNAME
+RUN useradd -m -u $UID -g $GID -o -s /bin/bash $UNAME
+USER $UNAME
+
# Set the working directory inside the container
WORKDIR /app
@@ -13,8 +24,5 @@ COPY --from=distributions server-${VERSION}.tar .
# Extract the TAR file
RUN tar -xvf server-${VERSION}.tar
-# Expose the port that the application will run on
-EXPOSE 8080
-
# RUN the bin script for starting the server
-ENTRYPOINT ["sh", "-c", "/app/server-${VERSION}/bin/server"]
+ENTRYPOINT ["/bin/bash", "-c", "/app/server-${VERSION}/bin/server"]
diff --git a/server/docker/docker-compose.yml b/server/docker/docker-compose.yml
index a09e6adc4..57740349f 100644
--- a/server/docker/docker-compose.yml
+++ b/server/docker/docker-compose.yml
@@ -6,5 +6,6 @@ services:
env_file:
- .env
ports:
+ - "9999:9999"
- "8080:8080"
- "5005:5005"
diff --git a/server/docs/design/block-persistence.md b/server/docs/design/block-persistence.md
new file mode 100644
index 000000000..ea10f49ac
--- /dev/null
+++ b/server/docs/design/block-persistence.md
@@ -0,0 +1,56 @@
+# Block Persistence
+
+## Purpose
+
+The main objective of the `hedera-block-node` project is to replace the storage of Consensus Node artifacts (e.g.
+Blocks) on cloud storage buckets (e.g. GCS and S3) with a solution managed by the Block Node server. This document aims
+to describe the high-level design of how the Block Node persists and retrieves Blocks and how it handles exception cases
+when they arise.
+
+---
+
+### Goals
+
+1) BlockItems streamed from a producer (e.g. Consensus Node) must be collated and persisted as a Block. Per the
+ specification, a Block is an ordered list of BlockItems. How the Block is persisted is an implementation detail.
+2) A Block must be efficiently retrieved by block number.
+
+---
+
+### Terms
+
+**BlockItem** - A BlockItem is the primary data structure passed between the producer, the `hedera-block-node`
+and consumers. The BlockItem description and protobuf definition are maintained in the `hedera-protobuf`
+[project](https://github.com/hashgraph/hedera-protobufs/blob/continue-block-node/documents/api/block/stream/block_item.md).
+
+**Block** - A Block is the base element of the block stream at rest. At present, it consists of an ordered collection of
+BlockItems. The Block description and protobuf definition are maintained in the `hedera-protobuf`
+[project](https://github.com/hashgraph/hedera-protobufs/blob/continue-block-node/documents/api/block/stream/block.md).
+
+---
+
+### Entities
+
+**BlockReader** - An interface defining methods used to read a Block from storage. It represents a lower-level
+component whose implementation is directly responsible for reading a Block from storage.
+
+**BlockWriter** - An interface defining methods used to write BlockItems to storage. It represents a lower-level
+component whose implementation is directly responsible for writing a BlockItem to storage as a Block.
+
+**BlockRemover** - An interface defining the methods used to remove a Block from storage. It represents a lower-level
+component whose implementation is directly responsible for removing a Block from storage.
+
+---
+
+### Design
+
+The design for `Block` persistence is fairly straightforward. Block server objects should use the persistence entity
+interfaces to read, write and remove `Block`s from storage. `BlockItem`s streamed from a producer are read off the wire
+one by one and passed to an implementation of `BlockWriter`. The `BlockWriter` is responsible for collecting related
+`BlockItem`s into a `Block` and persisting the `Block` to storage in a way that is efficient for retrieval at a later
+time. The `BlockWriter` is also responsible for removing a partially written `Block` if an exception occurs while
+writing it. For example, if half the `BlockItem`s of a `Block` are written when an IOException occurs, the `BlockWriter`
+should remove all the `BlockItem`s of the partially written `Block` and pass the exception up to the caller. Services
+requiring one or more `Block`s should leverage a `BlockReader` implementation. The `BlockReader` should be able to
+efficiently retrieve a `Block` by block number. The `BlockReader` should pass unrecoverable exceptions when reading
+a `Block` up to the caller.
diff --git a/server/src/main/java/com/hedera/block/server/BlockStreamService.java b/server/src/main/java/com/hedera/block/server/BlockStreamService.java
index 7b40f4034..77fb3b3e0 100644
--- a/server/src/main/java/com/hedera/block/server/BlockStreamService.java
+++ b/server/src/main/java/com/hedera/block/server/BlockStreamService.java
@@ -16,70 +16,82 @@
package com.hedera.block.server;
+import static com.hedera.block.protos.BlockStreamService.*;
import static com.hedera.block.server.Constants.*;
-import static io.helidon.webserver.grpc.ResponseHelper.complete;
import com.google.protobuf.Descriptors;
-import com.hedera.block.protos.BlockStreamServiceGrpcProto;
-import com.hedera.block.server.consumer.LiveStreamObserver;
-import com.hedera.block.server.consumer.LiveStreamObserverImpl;
+import com.hedera.block.server.config.BlockNodeContext;
+import com.hedera.block.server.consumer.ConsumerStreamResponseObserver;
+import com.hedera.block.server.data.ObjectEvent;
import com.hedera.block.server.mediator.StreamMediator;
-import com.hedera.block.server.persistence.BlockPersistenceHandler;
-import com.hedera.block.server.producer.ProducerBlockStreamObserver;
+import com.hedera.block.server.metrics.MetricsService;
+import com.hedera.block.server.persistence.storage.read.BlockReader;
+import com.hedera.block.server.producer.ItemAckBuilder;
+import com.hedera.block.server.producer.ProducerBlockItemObserver;
+import edu.umd.cs.findbugs.annotations.NonNull;
import io.grpc.stub.StreamObserver;
import io.helidon.webserver.grpc.GrpcService;
+import java.io.IOException;
import java.time.Clock;
import java.util.Optional;
/**
- * This class implements the GrpcService interface and provides the functionality for the
- * BlockStreamService. It sets up the bidirectional streaming methods for the service and handles
- * the routing for these methods. It also initializes the StreamMediator, BlockStorage, and
- * BlockCache upon creation.
- *
- *
The class provides two main methods, streamSink and streamSource, which handle the client and
- * server streaming respectively. These methods return custom StreamObservers which are used to
- * observe and respond to the streams.
+ * The BlockStreamService class defines the gRPC service for the block stream service. It provides
+ * the implementation for the bidirectional streaming, server streaming, and unary methods defined
+ * in the proto file.
*/
public class BlockStreamService implements GrpcService {
private final System.Logger LOGGER = System.getLogger(getClass().getName());
private final long timeoutThresholdMillis;
- private final StreamMediator<
- BlockStreamServiceGrpcProto.Block, BlockStreamServiceGrpcProto.BlockResponse>
- streamMediator;
- private final BlockPersistenceHandler
- blockPersistenceHandler;
+ private final ItemAckBuilder itemAckBuilder;
+ private final StreamMediator> streamMediator;
+ private final ServiceStatus serviceStatus;
+ private final BlockReader blockReader;
+ private final BlockNodeContext blockNodeContext;
/**
- * Constructor for the BlockStreamService class.
+ * Constructor for the BlockStreamService class. It initializes the BlockStreamService with the
+ * given parameters.
*
- * @param timeoutThresholdMillis the timeout threshold in milliseconds
- * @param streamMediator the stream mediator
+ * @param timeoutThresholdMillis the timeout threshold in milliseconds for the producer to
+ * publish block items
+ * @param itemAckBuilder the item acknowledgement builder to send responses back to the producer
+ * @param streamMediator the stream mediator to proxy block items from the producer to the
+ * subscribers and manage the subscription lifecycle for subscribers
+ * @param blockReader the block reader to fetch blocks from storage for unary singleBlock
+ * service calls
+ * @param serviceStatus the service status provides methods to check service availability and to
+ * stop the service and web server in the event of an unrecoverable exception
*/
- public BlockStreamService(
+ BlockStreamService(
final long timeoutThresholdMillis,
- final StreamMediator<
- BlockStreamServiceGrpcProto.Block,
- BlockStreamServiceGrpcProto.BlockResponse>
- streamMediator,
- final BlockPersistenceHandler
- blockPersistenceHandler) {
-
+ @NonNull final ItemAckBuilder itemAckBuilder,
+ @NonNull
+ final StreamMediator>
+ streamMediator,
+ @NonNull final BlockReader blockReader,
+ @NonNull final ServiceStatus serviceStatus,
+ @NonNull final BlockNodeContext blockNodeContext) {
this.timeoutThresholdMillis = timeoutThresholdMillis;
+ this.itemAckBuilder = itemAckBuilder;
this.streamMediator = streamMediator;
- this.blockPersistenceHandler = blockPersistenceHandler;
+ this.blockReader = blockReader;
+ this.serviceStatus = serviceStatus;
+ this.blockNodeContext = blockNodeContext;
}
/**
- * Returns the FileDescriptor for the BlockStreamServiceGrpcProto.
+ * Returns the proto descriptor for the BlockStreamService. This descriptor corresponds to the
+ * proto file for the BlockStreamService.
*
- * @return the FileDescriptor for the BlockStreamServiceGrpcProto
+ * @return the proto descriptor for the BlockStreamService
*/
+ @NonNull
@Override
public Descriptors.FileDescriptor proto() {
- return BlockStreamServiceGrpcProto.getDescriptor();
+ return com.hedera.block.protos.BlockStreamService.getDescriptor();
}
/**
@@ -88,89 +100,225 @@ public Descriptors.FileDescriptor proto() {
*
* @return the service name corresponding to the service name in the proto file
*/
+ @NonNull
@Override
public String serviceName() {
return SERVICE_NAME;
}
/**
- * Updates the routing for the BlockStreamService. It sets up the bidirectional streaming
- * methods for the service.
+ * Updates the routing definitions for the BlockStreamService. It establishes the bidirectional
+ * streaming method for publishBlockStream, server streaming method for subscribeBlockStream and
+ * a unary method for singleBlock.
*
* @param routing the routing for the BlockStreamService
*/
@Override
- public void update(final Routing routing) {
- routing.bidi(CLIENT_STREAMING_METHOD_NAME, this::streamSink);
- routing.bidi(SERVER_STREAMING_METHOD_NAME, this::streamSource);
- routing.unary(GET_BLOCK_METHOD_NAME, this::getBlock);
+ public void update(@NonNull final Routing routing) {
+ routing.bidi(CLIENT_STREAMING_METHOD_NAME, this::publishBlockStream);
+ routing.serverStream(SERVER_STREAMING_METHOD_NAME, this::subscribeBlockStream);
+ routing.unary(SINGLE_BLOCK_METHOD_NAME, this::singleBlock);
}
- /**
- * The streamSink method is called by Helidon each time a producer initiates a bidirectional
- * stream.
- *
- * @param responseStreamObserver Helidon provides a StreamObserver to handle responses back to
- * the producer.
- * @return a custom StreamObserver to handle streaming blocks from the producer to all
- * subscribed consumer via the streamMediator as well as sending responses back to the
- * producer.
- */
- private StreamObserver streamSink(
- final StreamObserver
- responseStreamObserver) {
- LOGGER.log(System.Logger.Level.DEBUG, "Executing bidirectional streamSink method");
+ StreamObserver publishBlockStream(
+ @NonNull final StreamObserver publishStreamResponseObserver) {
+ LOGGER.log(
+ System.Logger.Level.DEBUG,
+ "Executing bidirectional publishBlockStream gRPC method");
- return new ProducerBlockStreamObserver(streamMediator, responseStreamObserver);
+ return new ProducerBlockItemObserver(
+ streamMediator, publishStreamResponseObserver, itemAckBuilder, serviceStatus);
}
- /**
- * The streamSource method is called by Helidon each time a consumer initiates a bidirectional
- * stream.
- *
- * @param responseStreamObserver Helidon provides a StreamObserver to handle responses from the
- * consumer back to the server.
- * @return a custom StreamObserver to handle streaming blocks from the producer to the consumer
- * as well as handling responses from the consumer.
- */
- private StreamObserver streamSource(
- final StreamObserver responseStreamObserver) {
- LOGGER.log(System.Logger.Level.DEBUG, "Executing bidirectional streamSource method");
+ void subscribeBlockStream(
+ @NonNull final SubscribeStreamRequest subscribeStreamRequest,
+ @NonNull
+ final StreamObserver subscribeStreamResponseObserver) {
+ LOGGER.log(
+ System.Logger.Level.DEBUG,
+ "Executing Server Streaming subscribeBlockStream gRPC Service");
// Return a custom StreamObserver to handle streaming blocks from the producer.
- final LiveStreamObserver<
- BlockStreamServiceGrpcProto.Block,
- BlockStreamServiceGrpcProto.BlockResponse>
- streamObserver =
- new LiveStreamObserverImpl(
- timeoutThresholdMillis,
- Clock.systemDefaultZone(),
- Clock.systemDefaultZone(),
- streamMediator,
- responseStreamObserver);
-
- // Subscribe the observer to the mediator
- streamMediator.subscribe(streamObserver);
-
- return streamObserver;
+ if (serviceStatus.isRunning()) {
+ @NonNull
+ final var streamObserver =
+ new ConsumerStreamResponseObserver(
+ timeoutThresholdMillis,
+ Clock.systemDefaultZone(),
+ streamMediator,
+ subscribeStreamResponseObserver);
+
+ streamMediator.subscribe(streamObserver);
+ } else {
+ LOGGER.log(
+ System.Logger.Level.ERROR,
+ "Server Streaming subscribeBlockStream gRPC Service is not currently running");
+
+ subscribeStreamResponseObserver.onNext(buildSubscribeStreamNotAvailableResponse());
+ }
}
- void getBlock(
- BlockStreamServiceGrpcProto.Block block,
- StreamObserver responseObserver) {
- LOGGER.log(System.Logger.Level.INFO, "GetBlock request received");
- final Optional responseBlock =
- blockPersistenceHandler.read(block.getId());
- if (responseBlock.isPresent()) {
- LOGGER.log(System.Logger.Level.INFO, "Returning block with id: {0}", block.getId());
- complete(responseObserver, responseBlock.get());
+ /*
+ public static class SingleBlockUnaryMethod implements GrpcService.Routing.UnaryMethod {
+ // implements ServerCalls.UnaryMethod<
+ // SingleBlockRequest, StreamObserver> {
+
+ private final System.Logger LOGGER = System.getLogger(getClass().getName());
+
+ private final BlockReader blockReader;
+ private final ServiceStatus serviceStatus;
+ private final BlockNodeContext blockNodeContext;
+
+ private SingleBlockUnaryMethod(@NonNull final BlockReader blockReader,
+ @NonNull final ServiceStatus serviceStatus,
+ @NonNull final BlockNodeContext blockNodeContext) {
+ this.blockReader = blockReader;
+ this.serviceStatus = serviceStatus;
+ this.blockNodeContext = blockNodeContext;
+ }
+
+ @Override
+ public void afterClose() {
+ LOGGER.log(System.Logger.Level.DEBUG, "Unary singleBlock gRPC method closed");
+ }
+
+ @Override
+ public void invoke(
+ SingleBlockRequest singleBlockRequest,
+ StreamObserver> singleBlockResponseStreamObserver) {
+
+ LOGGER.log(System.Logger.Level.DEBUG, "Executing Unary singleBlock gRPC method");
+
+ if (serviceStatus.isRunning()) {
+ final long blockNumber = singleBlockRequest.getBlockNumber();
+ try {
+ @NonNull final Optional blockOpt = blockReader.read(blockNumber);
+ if (blockOpt.isPresent()) {
+ LOGGER.log(
+ System.Logger.Level.DEBUG,
+ "Successfully returning block number: {0}",
+ blockNumber);
+ singleBlockResponseStreamObserver.onNext(
+ buildSingleBlockResponse(blockOpt.get()));
+
+ @NonNull
+ final MetricsService metricsService = blockNodeContext.metricsService();
+ metricsService.singleBlockRetrievedCounter.increment();
+ } else {
+ LOGGER.log(
+ System.Logger.Level.DEBUG,
+ "Block number {0} not found",
+ blockNumber);
+ singleBlockResponseStreamObserver.onNext(
+ buildSingleBlockNotFoundResponse());
+ }
+ } catch (IOException e) {
+ LOGGER.log(
+ System.Logger.Level.ERROR,
+ "Error reading block number: {0}",
+ blockNumber);
+ singleBlockResponseStreamObserver.onNext(
+ buildSingleBlockNotAvailableResponse());
+ }
+ } else {
+ LOGGER.log(
+ System.Logger.Level.ERROR,
+ "Unary singleBlock gRPC method is not currently running");
+ singleBlockResponseStreamObserver.onNext(buildSingleBlockNotAvailableResponse());
+ }
+
+ // Send the response
+ singleBlockResponseStreamObserver.onCompleted();
+ }
+
+ @NonNull
+ static StreamObserver buildSingleBlockNotAvailableResponse() {
+ return SingleBlockResponse.newBuilder()
+ .setStatus(SingleBlockResponse.SingleBlockResponseCode.READ_BLOCK_NOT_AVAILABLE)
+ .build();
+ }
+
+ @NonNull
+ static StreamObserver buildSingleBlockNotFoundResponse() {
+ return SingleBlockResponse.newBuilder()
+ .setStatus(SingleBlockResponse.SingleBlockResponseCode.READ_BLOCK_NOT_FOUND)
+ .build();
+ }
+
+ @NonNull
+ private static StreamObserver buildSingleBlockResponse(@NonNull final Block block) {
+ return SingleBlockResponse.newBuilder().setBlock(block).build();
+ }
+ }
+ */
+
+ void singleBlock(
+ @NonNull final SingleBlockRequest singleBlockRequest,
+ @NonNull final StreamObserver singleBlockResponseStreamObserver) {
+
+ LOGGER.log(System.Logger.Level.DEBUG, "Executing Unary singleBlock gRPC method");
+
+ if (serviceStatus.isRunning()) {
+ final long blockNumber = singleBlockRequest.getBlockNumber();
+ try {
+ @NonNull final Optional blockOpt = blockReader.read(blockNumber);
+ if (blockOpt.isPresent()) {
+ LOGGER.log(
+ System.Logger.Level.DEBUG,
+ "Successfully returning block number: {0}",
+ blockNumber);
+ singleBlockResponseStreamObserver.onNext(
+ buildSingleBlockResponse(blockOpt.get()));
+
+ @NonNull
+ final MetricsService metricsService = blockNodeContext.metricsService();
+ metricsService.singleBlocksRetrieved.increment();
+ } else {
+ LOGGER.log(
+ System.Logger.Level.DEBUG, "Block number {0} not found", blockNumber);
+ singleBlockResponseStreamObserver.onNext(buildSingleBlockNotFoundResponse());
+ }
+ } catch (IOException e) {
+ LOGGER.log(
+ System.Logger.Level.ERROR, "Error reading block number: {0}", blockNumber);
+ singleBlockResponseStreamObserver.onNext(buildSingleBlockNotAvailableResponse());
+ }
} else {
LOGGER.log(
- System.Logger.Level.INFO,
- "Did not find your block with id: {0}",
- block.getId());
- responseObserver.onNext(
- BlockStreamServiceGrpcProto.Block.newBuilder().setId(0).build());
+ System.Logger.Level.ERROR,
+ "Unary singleBlock gRPC method is not currently running");
+ singleBlockResponseStreamObserver.onNext(buildSingleBlockNotAvailableResponse());
}
+
+ // Send the response
+ singleBlockResponseStreamObserver.onCompleted();
+ }
+
+ // TODO: Fix this error type once it's been standardized in `hedera-protobufs`
+ // this should not be success
+ @NonNull
+ static SubscribeStreamResponse buildSubscribeStreamNotAvailableResponse() {
+ return SubscribeStreamResponse.newBuilder()
+ .setStatus(SubscribeStreamResponse.SubscribeStreamResponseCode.READ_STREAM_SUCCESS)
+ .build();
+ }
+
+ @NonNull
+ static SingleBlockResponse buildSingleBlockNotAvailableResponse() {
+ return SingleBlockResponse.newBuilder()
+ .setStatus(SingleBlockResponse.SingleBlockResponseCode.READ_BLOCK_NOT_AVAILABLE)
+ .build();
+ }
+
+ @NonNull
+ static SingleBlockResponse buildSingleBlockNotFoundResponse() {
+ return SingleBlockResponse.newBuilder()
+ .setStatus(SingleBlockResponse.SingleBlockResponseCode.READ_BLOCK_NOT_FOUND)
+ .build();
+ }
+
+ @NonNull
+ private static SingleBlockResponse buildSingleBlockResponse(@NonNull final Block block) {
+ return SingleBlockResponse.newBuilder().setBlock(block).build();
}
}
diff --git a/server/src/main/java/com/hedera/block/server/Constants.java b/server/src/main/java/com/hedera/block/server/Constants.java
index a48d3a4b7..65eaf30ce 100644
--- a/server/src/main/java/com/hedera/block/server/Constants.java
+++ b/server/src/main/java/com/hedera/block/server/Constants.java
@@ -16,18 +16,33 @@
package com.hedera.block.server;
+import edu.umd.cs.findbugs.annotations.NonNull;
+
/** Constants used in the BlockNode service. */
public final class Constants {
private Constants() {}
- // Config Constants
+ /** Constant mapped to the root path config key where the block files are stored */
+ @NonNull
public static final String BLOCKNODE_STORAGE_ROOT_PATH_KEY = "blocknode.storage.root.path";
+
+ /** Constant mapped to the timeout for stream consumers in milliseconds */
+ @NonNull
public static final String BLOCKNODE_SERVER_CONSUMER_TIMEOUT_THRESHOLD_KEY =
"blocknode.server.consumer.timeout.threshold";
- // Constants specified in the service definition of the .proto file
- public static final String SERVICE_NAME = "BlockStreamGrpc";
- public static final String CLIENT_STREAMING_METHOD_NAME = "StreamSink";
- public static final String SERVER_STREAMING_METHOD_NAME = "StreamSource";
- public static final String GET_BLOCK_METHOD_NAME = "GetBlock";
+ /** Constant mapped to the name of the service in the .proto file */
+ @NonNull public static final String SERVICE_NAME = "BlockStreamGrpcService";
+
+ /** Constant mapped to the publishBlockStream service method name in the .proto file */
+ @NonNull public static final String CLIENT_STREAMING_METHOD_NAME = "publishBlockStream";
+
+ /** Constant mapped to the subscribeBlockStream service method name in the .proto file */
+ @NonNull public static final String SERVER_STREAMING_METHOD_NAME = "subscribeBlockStream";
+
+ /** Constant mapped to the singleBlock service method name in the .proto file */
+ @NonNull public static final String SINGLE_BLOCK_METHOD_NAME = "singleBlock";
+
+ /** Constant defining the block file extension */
+ @NonNull public static final String BLOCK_FILE_EXTENSION = ".blk";
}
diff --git a/server/src/main/java/com/hedera/block/server/Server.java b/server/src/main/java/com/hedera/block/server/Server.java
index 2a84f097c..0c392c595 100644
--- a/server/src/main/java/com/hedera/block/server/Server.java
+++ b/server/src/main/java/com/hedera/block/server/Server.java
@@ -16,38 +16,30 @@
package com.hedera.block.server;
-import static com.hedera.block.server.Constants.*;
+import static com.hedera.block.protos.BlockStreamService.*;
+import static com.hedera.block.server.Constants.BLOCKNODE_SERVER_CONSUMER_TIMEOUT_THRESHOLD_KEY;
+import static com.hedera.block.server.Constants.BLOCKNODE_STORAGE_ROOT_PATH_KEY;
-import com.hedera.block.protos.BlockStreamServiceGrpcProto;
import com.hedera.block.server.config.BlockNodeContext;
import com.hedera.block.server.config.BlockNodeContextFactory;
-import com.hedera.block.server.mediator.LiveStreamMediatorImpl;
+import com.hedera.block.server.data.ObjectEvent;
+import com.hedera.block.server.mediator.LiveStreamMediatorBuilder;
+import com.hedera.block.server.mediator.StreamMediator;
import com.hedera.block.server.metrics.MetricsService;
-import com.hedera.block.server.persistence.WriteThroughCacheHandler;
-import com.hedera.block.server.persistence.storage.BlockStorage;
-import com.hedera.block.server.persistence.storage.FileSystemBlockStorage;
-import io.grpc.stub.ServerCalls;
-import io.grpc.stub.StreamObserver;
+import com.hedera.block.server.persistence.storage.read.BlockAsDirReaderBuilder;
+import com.hedera.block.server.persistence.storage.read.BlockReader;
+import com.hedera.block.server.persistence.storage.write.BlockAsDirWriterBuilder;
+import com.hedera.block.server.persistence.storage.write.BlockWriter;
+import com.hedera.block.server.producer.ItemAckBuilder;
+import edu.umd.cs.findbugs.annotations.NonNull;
import io.helidon.config.Config;
import io.helidon.webserver.WebServer;
import io.helidon.webserver.grpc.GrpcRouting;
import java.io.IOException;
-import java.util.stream.Stream;
/** Main class for the block node server */
public class Server {
- // Function stubs to satisfy the bidi routing param signatures. The implementations are in the
- // service class.
- private static ServerCalls.BidiStreamingMethod<
- Stream,
- StreamObserver>
- clientBidiStreamingMethod;
- private static ServerCalls.BidiStreamingMethod<
- Stream,
- StreamObserver>
- serverBidiStreamingMethod;
-
private static final System.Logger LOGGER = System.getLogger(Server.class.getName());
private Server() {}
@@ -55,69 +47,86 @@ private Server() {}
/**
* Main entrypoint for the block node server
*
- * @param args Command line arguments. Not used at present,
+ * @param args Command line arguments. Not used at present.
*/
public static void main(final String[] args) {
+ LOGGER.log(System.Logger.Level.INFO, "Starting BlockNode Server");
+
try {
// init metrics
- BlockNodeContext blockNodeContext = BlockNodeContextFactory.create();
+ @NonNull final BlockNodeContext blockNodeContext = BlockNodeContextFactory.create();
// increase by 1 just for the sake of an example
- MetricsService metricsService = blockNodeContext.metricsService();
+ @NonNull final MetricsService metricsService = blockNodeContext.metricsService();
metricsService.exampleCounter.increment();
// Set the global configuration
- final Config config = Config.create();
+ @NonNull final Config config = Config.create();
Config.global(config);
- // Get Timeout threshold from configuration
- final long consumerTimeoutThreshold =
- config.get(BLOCKNODE_SERVER_CONSUMER_TIMEOUT_THRESHOLD_KEY)
- .asLong()
- .orElse(1500L);
+ @NonNull final ServiceStatus serviceStatus = new ServiceStatusImpl();
+
+ @NonNull
+ final BlockWriter blockWriter =
+ BlockAsDirWriterBuilder.newBuilder(
+ BLOCKNODE_STORAGE_ROOT_PATH_KEY, config, blockNodeContext)
+ .build();
+ @NonNull
+ final StreamMediator> streamMediator =
+ LiveStreamMediatorBuilder.newBuilder(
+ blockWriter, blockNodeContext, serviceStatus)
+ .build();
+
+ @NonNull
+ final BlockReader blockReader =
+ BlockAsDirReaderBuilder.newBuilder(BLOCKNODE_STORAGE_ROOT_PATH_KEY, config)
+ .build();
+
+ @NonNull
+ final BlockStreamService blockStreamService =
+ buildBlockStreamService(
+ config, streamMediator, blockReader, serviceStatus, blockNodeContext);
+
+ @NonNull
+ final GrpcRouting.Builder grpcRouting =
+ GrpcRouting.builder().service(blockStreamService);
- // Initialize the block storage, cache, and service
- final BlockStorage blockStorage =
- new FileSystemBlockStorage(BLOCKNODE_STORAGE_ROOT_PATH_KEY, config);
+ // Build the web server
+ @NonNull
+ final WebServer webServer =
+ WebServer.builder().port(8080).addRouting(grpcRouting).build();
- // Initialize blockStreamService with Live Stream and Cache
- final BlockStreamService blockStreamService =
- new BlockStreamService(
- consumerTimeoutThreshold,
- new LiveStreamMediatorImpl(new WriteThroughCacheHandler(blockStorage)),
- new WriteThroughCacheHandler(blockStorage));
+ // Update the serviceStatus with the web server
+ serviceStatus.setWebServer(webServer);
// Start the web server
- WebServer.builder()
- .port(8080)
- .addRouting(
- GrpcRouting.builder()
- .service(blockStreamService)
- .bidi(
- BlockStreamServiceGrpcProto.getDescriptor(),
- SERVICE_NAME,
- CLIENT_STREAMING_METHOD_NAME,
- clientBidiStreamingMethod)
- .bidi(
- BlockStreamServiceGrpcProto.getDescriptor(),
- SERVICE_NAME,
- SERVER_STREAMING_METHOD_NAME,
- serverBidiStreamingMethod)
- .unary(
- BlockStreamServiceGrpcProto.getDescriptor(),
- SERVICE_NAME,
- GET_BLOCK_METHOD_NAME,
- Server::grpcGetBlock))
- .build()
- .start();
+ webServer.start();
} catch (IOException e) {
- LOGGER.log(System.Logger.Level.ERROR, "An exception was thrown starting the server", e);
throw new RuntimeException(e);
}
}
- static void grpcGetBlock(
- BlockStreamServiceGrpcProto.BlockRequest request,
- StreamObserver responseObserver) {}
+ @NonNull
+ private static BlockStreamService buildBlockStreamService(
+ @NonNull final Config config,
+ @NonNull
+ final StreamMediator>
+ streamMediator,
+ @NonNull final BlockReader blockReader,
+ @NonNull final ServiceStatus serviceStatus,
+ @NonNull final BlockNodeContext blockNodeContext) {
+
+ // Get Timeout threshold from configuration
+ final long consumerTimeoutThreshold =
+ config.get(BLOCKNODE_SERVER_CONSUMER_TIMEOUT_THRESHOLD_KEY).asLong().orElse(1500L);
+
+ return new BlockStreamService(
+ consumerTimeoutThreshold,
+ new ItemAckBuilder(),
+ streamMediator,
+ blockReader,
+ serviceStatus,
+ blockNodeContext);
+ }
}
diff --git a/server/src/main/java/com/hedera/block/server/ServiceStatus.java b/server/src/main/java/com/hedera/block/server/ServiceStatus.java
new file mode 100644
index 000000000..1dc330d62
--- /dev/null
+++ b/server/src/main/java/com/hedera/block/server/ServiceStatus.java
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2024 Hedera Hashgraph, LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.hedera.block.server;
+
+import edu.umd.cs.findbugs.annotations.NonNull;
+import io.helidon.webserver.WebServer;
+
+/**
+ * The ServiceStatus interface defines the contract for checking the status of the service and
+ * shutting down the web server.
+ */
+public interface ServiceStatus {
+
+ /**
+ * Checks if the service is running.
+ *
+ * @return true if the service is running, false otherwise
+ */
+ boolean isRunning();
+
+ /**
+ * Sets the running status of the service.
+ *
+ * @param running true if the service is running, false otherwise
+ */
+ void setRunning(final boolean running);
+
+ /**
+ * Sets the web server instance.
+ *
+ * @param webServer the web server instance
+ */
+ void setWebServer(@NonNull final WebServer webServer);
+
+ /**
+ * Stops the service and web server. This method is called to shut down the service and the web
+ * server in the event of an error or when the service needs to restart.
+ */
+ void stopWebServer();
+}
diff --git a/server/src/main/java/com/hedera/block/server/ServiceStatusImpl.java b/server/src/main/java/com/hedera/block/server/ServiceStatusImpl.java
new file mode 100644
index 000000000..ac9b869e2
--- /dev/null
+++ b/server/src/main/java/com/hedera/block/server/ServiceStatusImpl.java
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2024 Hedera Hashgraph, LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.hedera.block.server;
+
+import edu.umd.cs.findbugs.annotations.NonNull;
+import io.helidon.webserver.WebServer;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+/**
+ * The ServiceStatusImpl class implements the ServiceStatus interface. It provides the
+ * implementation for checking the status of the service and shutting down the web server.
+ */
+public class ServiceStatusImpl implements ServiceStatus {
+
+ private final AtomicBoolean isRunning = new AtomicBoolean(true);
+ private WebServer webServer;
+
+ /** Constructor for the ServiceStatusImpl class. */
+ public ServiceStatusImpl() {}
+
+ /**
+ * Checks if the service is running.
+ *
+ * @return true if the service is running, false otherwise
+ */
+ public boolean isRunning() {
+ return isRunning.get();
+ }
+
+ /**
+ * Sets the running status of the service.
+ *
+ * @param running true if the service is running, false otherwise
+ */
+ public void setRunning(final boolean running) {
+ isRunning.set(running);
+ }
+
+ /**
+ * Sets the web server instance.
+ *
+ * @param webServer the web server instance
+ */
+ public void setWebServer(@NonNull final WebServer webServer) {
+ this.webServer = webServer;
+ }
+
+ /**
+ * Stops the service and web server. This method is called to shut down the service and the web
+ * server in the event of an unrecoverable exception or during expected maintenance.
+ */
+ public void stopWebServer() {
+
+ // Flag the service to stop
+ // accepting new connections
+ isRunning.set(false);
+
+ // Stop the web server
+ webServer.stop();
+ }
+}
diff --git a/server/src/main/java/com/hedera/block/server/config/BlockNodeContextFactory.java b/server/src/main/java/com/hedera/block/server/config/BlockNodeContextFactory.java
index 1e400771c..b47098bef 100644
--- a/server/src/main/java/com/hedera/block/server/config/BlockNodeContextFactory.java
+++ b/server/src/main/java/com/hedera/block/server/config/BlockNodeContextFactory.java
@@ -36,8 +36,12 @@ public class BlockNodeContextFactory {
private BlockNodeContextFactory() {}
/**
+ * Use the create method to build a singleton block node context to manage system-wide metrics.
+ *
* @return an instance of {@link BlockNodeContext} which holds {@link Configuration}, {@link
* Metrics} and {@link MetricsService} for the rest of the application to use.
+ * @throws IOException when the java libraries fail to read information from a configuration
+ * source.
*/
public static BlockNodeContext create() throws IOException {
final Configuration configuration = getConfiguration();
diff --git a/server/src/main/java/com/hedera/block/server/consumer/ConsumerStreamResponseObserver.java b/server/src/main/java/com/hedera/block/server/consumer/ConsumerStreamResponseObserver.java
new file mode 100644
index 000000000..6beec8878
--- /dev/null
+++ b/server/src/main/java/com/hedera/block/server/consumer/ConsumerStreamResponseObserver.java
@@ -0,0 +1,174 @@
+/*
+ * Copyright (C) 2024 Hedera Hashgraph, LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.hedera.block.server.consumer;
+
+import static com.hedera.block.protos.BlockStreamService.BlockItem;
+import static com.hedera.block.protos.BlockStreamService.SubscribeStreamResponse;
+
+import com.hedera.block.server.data.ObjectEvent;
+import com.hedera.block.server.mediator.SubscriptionHandler;
+import com.lmax.disruptor.EventHandler;
+import edu.umd.cs.findbugs.annotations.NonNull;
+import io.grpc.stub.ServerCallStreamObserver;
+import io.grpc.stub.StreamObserver;
+import java.time.InstantSource;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+/**
+ * The ConsumerBlockItemObserver class is the primary integration point between the LMAX Disruptor
+ * and an instance of a downstream consumer (represented by subscribeStreamResponseObserver provided
+ * by Helidon). The ConsumerBlockItemObserver implements the EventHandler interface so the Disruptor
+ * can invoke the onEvent() method when a new SubscribeStreamResponse is available.
+ */
+public class ConsumerStreamResponseObserver
+ implements EventHandler> {
+
+ private final System.Logger LOGGER = System.getLogger(getClass().getName());
+
+ private final StreamObserver subscribeStreamResponseObserver;
+ private final SubscriptionHandler> subscriptionHandler;
+
+ private final long timeoutThresholdMillis;
+ private final InstantSource producerLivenessClock;
+ private long producerLivenessMillis;
+
+ private boolean streamStarted;
+ private final AtomicBoolean isResponsePermitted = new AtomicBoolean(true);
+
+ /**
+ * The onCancel handler to execute when the consumer cancels the stream. This handler is
+ * protected to facilitate testing.
+ */
+ protected Runnable onCancel;
+
+ /**
+ * The onClose handler to execute when the consumer closes the stream. This handler is protected
+ * to facilitate testing.
+ */
+ protected Runnable onClose;
+
+ /**
+ * Constructor for the ConsumerBlockItemObserver class. It is responsible for observing the
+ * SubscribeStreamResponse events from the Disruptor and passing them to the downstream consumer
+ * via the subscribeStreamResponseObserver.
+ *
+ * @param timeoutThresholdMillis the timeout threshold in milliseconds for the producer to
+ * publish block items
+ * @param producerLivenessClock the clock to use to determine the producer liveness
+ * @param subscriptionHandler the subscription handler to use to manage the subscription
+ * lifecycle
+ * @param subscribeStreamResponseObserver the observer to use to send responses to the consumer
+ */
+ public ConsumerStreamResponseObserver(
+ final long timeoutThresholdMillis,
+ @NonNull final InstantSource producerLivenessClock,
+ @NonNull
+ final SubscriptionHandler>
+ subscriptionHandler,
+ @NonNull
+ final StreamObserver subscribeStreamResponseObserver) {
+
+ this.timeoutThresholdMillis = timeoutThresholdMillis;
+ this.subscriptionHandler = subscriptionHandler;
+
+ // The ServerCallStreamObserver can be configured with Runnable handlers to
+ // be executed when a downstream consumer closes the connection. The handlers
+ // unsubscribe this observer.
+ if (subscribeStreamResponseObserver
+ instanceof
+ ServerCallStreamObserver
+ serverCallStreamObserver) {
+
+ onCancel =
+ () -> {
+ // The consumer has cancelled the stream.
+ // Do not allow additional responses to be sent.
+ isResponsePermitted.set(false);
+ subscriptionHandler.unsubscribe(this);
+ LOGGER.log(
+ System.Logger.Level.DEBUG,
+ "Consumer cancelled stream. Observer unsubscribed.");
+ };
+ serverCallStreamObserver.setOnCancelHandler(onCancel);
+
+ onClose =
+ () -> {
+ // The consumer has closed the stream.
+ // Do not allow additional responses to be sent.
+ isResponsePermitted.set(false);
+ subscriptionHandler.unsubscribe(this);
+ LOGGER.log(
+ System.Logger.Level.DEBUG,
+ "Consumer completed stream. Observer unsubscribed.");
+ };
+ serverCallStreamObserver.setOnCloseHandler(onClose);
+ }
+
+ this.subscribeStreamResponseObserver = subscribeStreamResponseObserver;
+ this.producerLivenessClock = producerLivenessClock;
+ this.producerLivenessMillis = producerLivenessClock.millis();
+ }
+
+ /**
+ * The onEvent method is invoked by the Disruptor when a new SubscribeStreamResponse is
+ * available. Before sending the response to the downstream consumer, the method checks the
+ * producer liveness and unsubscribes the observer if the producer activity is outside the
+ * configured timeout threshold. The method also ensures that the downstream subscriber has not
+ * cancelled or closed the stream before sending the response.
+ *
+ * @param event the ObjectEvent containing the SubscribeStreamResponse
+ * @param l the sequence number of the event
+ * @param b true if the event is the last in the sequence
+ */
+ @Override
+ public void onEvent(
+ @NonNull final ObjectEvent event,
+ final long l,
+ final boolean b) {
+
+ // Only send the response if the consumer has not cancelled
+ // or closed the stream.
+ if (isResponsePermitted.get()) {
+ final long currentMillis = producerLivenessClock.millis();
+ if (currentMillis - producerLivenessMillis > timeoutThresholdMillis) {
+ subscriptionHandler.unsubscribe(this);
+ LOGGER.log(
+ System.Logger.Level.DEBUG,
+ "Unsubscribed ConsumerBlockItemObserver due to producer timeout");
+ } else {
+ // Refresh the producer liveness and pass the BlockItem to the downstream observer.
+ producerLivenessMillis = currentMillis;
+
+ // Only start sending BlockItems after we've reached
+ // the beginning of a block.
+ @NonNull final SubscribeStreamResponse subscribeStreamResponse = event.get();
+ @NonNull final BlockItem blockItem = subscribeStreamResponse.getBlockItem();
+ if (!streamStarted && blockItem.hasHeader()) {
+ streamStarted = true;
+ }
+
+ if (streamStarted) {
+ LOGGER.log(
+ System.Logger.Level.DEBUG,
+ "Send BlockItem downstream: {0} ",
+ blockItem);
+ subscribeStreamResponseObserver.onNext(subscribeStreamResponse);
+ }
+ }
+ }
+ }
+}
diff --git a/server/src/main/java/com/hedera/block/server/consumer/LiveStreamObserver.java b/server/src/main/java/com/hedera/block/server/consumer/LiveStreamObserver.java
deleted file mode 100644
index 135ecb674..000000000
--- a/server/src/main/java/com/hedera/block/server/consumer/LiveStreamObserver.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (C) 2024 Hedera Hashgraph, LLC
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.hedera.block.server.consumer;
-
-import io.grpc.stub.StreamObserver;
-
-/**
- * The LiveStreamObserver interface augments the StreamObserver interface with the notify() method thereby
- * allowing a caller to pass a block to the observer of a different type than the StreamObserver. In this way,
- * the implementation of this interface can receive and process inbound messages with different types from
- * the producer and response messages from the consumer.
- *
- * @param the type of the block
- * @param the type of the StreamObserver
- */
-public interface LiveStreamObserver extends StreamObserver {
-
- /**
- * Pass the block to the observer.
- *
- * @param block - the block to be passed to the observer
- */
- void notify(final U block);
-}
diff --git a/server/src/main/java/com/hedera/block/server/consumer/LiveStreamObserverImpl.java b/server/src/main/java/com/hedera/block/server/consumer/LiveStreamObserverImpl.java
deleted file mode 100644
index 7a0d7db7b..000000000
--- a/server/src/main/java/com/hedera/block/server/consumer/LiveStreamObserverImpl.java
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Copyright (C) 2024 Hedera Hashgraph, LLC
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.hedera.block.server.consumer;
-
-import com.hedera.block.protos.BlockStreamServiceGrpcProto;
-import com.hedera.block.server.mediator.StreamMediator;
-import io.grpc.stub.StreamObserver;
-
-import java.time.Clock;
-import java.time.InstantSource;
-
-/**
- * The LiveStreamObserverImpl class implements the LiveStreamObserver interface to pass blocks to the downstream consumer
- * via the notify method and manage the bidirectional stream to the consumer via the onNext, onError, and onCompleted methods.
- */
-public class LiveStreamObserverImpl implements LiveStreamObserver {
-
- private final System.Logger LOGGER = System.getLogger(getClass().getName());
-
- private final StreamMediator mediator;
- private final StreamObserver responseStreamObserver;
-
- private final long timeoutThresholdMillis;
-
- private final InstantSource producerLivenessClock;
- private long producerLivenessMillis;
-
- private final InstantSource consumerLivenessClock;
- private long consumerLivenessMillis;
-
- /**
- * Constructor for the LiveStreamObserverImpl class.
- *
- * @param mediator the mediator
- * @param responseStreamObserver the response stream observer
- */
- public LiveStreamObserverImpl(
- final long timeoutThresholdMillis,
- final InstantSource producerLivenessClock,
- final InstantSource consumerLivenessClock,
- final StreamMediator mediator,
- final StreamObserver responseStreamObserver) {
-
- this.timeoutThresholdMillis = timeoutThresholdMillis;
- this.producerLivenessClock = producerLivenessClock;
- this.consumerLivenessClock = consumerLivenessClock;
- this.mediator = mediator;
- this.responseStreamObserver = responseStreamObserver;
-
- this.producerLivenessMillis = producerLivenessClock.millis();
- this.consumerLivenessMillis = consumerLivenessClock.millis();
- }
-
- /**
- * Pass the block to the observer provided by Helidon
- *
- * @param block the block to be passed to the observer
- */
- @Override
- public void notify(final BlockStreamServiceGrpcProto.Block block) {
-
- // Check if the consumer has timed out. If so, unsubscribe the observer from the mediator.
- if (consumerLivenessClock.millis() - consumerLivenessMillis > timeoutThresholdMillis) {
- if (mediator.isSubscribed(this)) {
- LOGGER.log(System.Logger.Level.DEBUG, "Consumer timeout threshold exceeded. Unsubscribing observer.");
- mediator.unsubscribe(this);
- }
- } else {
- // Refresh the producer liveness and pass the block to the observer.
- producerLivenessMillis = producerLivenessClock.millis();
- responseStreamObserver.onNext(block);
- }
- }
-
- /**
- * The onNext() method is triggered by Helidon when a consumer sends a blockResponse via the bidirectional stream.
- *
- * @param blockResponse the BlockResponse passed back to the server via the bidirectional stream to the downstream consumer.
- */
- @Override
- public void onNext(final BlockStreamServiceGrpcProto.BlockResponse blockResponse) {
-
- if (producerLivenessClock.millis() - producerLivenessMillis > timeoutThresholdMillis) {
- LOGGER.log(System.Logger.Level.DEBUG, "Producer timeout threshold exceeded. Unsubscribing observer.");
- mediator.unsubscribe(this);
- } else {
- LOGGER.log(System.Logger.Level.DEBUG, "Received response block " + blockResponse);
- consumerLivenessMillis = consumerLivenessClock.millis();
- }
- }
-
- /**
- * The onError() method is triggered by Helidon when an error occurs on the bidirectional stream to the downstream consumer.
- * Unsubscribe the observer from the mediator.
- *
- * @param t the error occurred on the stream
- */
- @Override
- public void onError(final Throwable t) {
- LOGGER.log(System.Logger.Level.ERROR, "Unexpected consumer stream communication failure: %s".formatted(t), t);
- mediator.unsubscribe(this);
- }
-
- /**
- * The onCompleted() method is triggered by Helidon when the bidirectional stream to the downstream consumer is completed.
- * This implementation will then unsubscribe the observer from the mediator.
- */
- @Override
- public void onCompleted() {
- LOGGER.log(System.Logger.Level.DEBUG, "gRPC connection completed. Unsubscribing observer.");
- mediator.unsubscribe(this);
- LOGGER.log(System.Logger.Level.DEBUG, "Unsubscribed observer.");
- }
-}
diff --git a/server/src/main/java/com/hedera/block/server/data/ObjectEvent.java b/server/src/main/java/com/hedera/block/server/data/ObjectEvent.java
new file mode 100644
index 000000000..d9256d920
--- /dev/null
+++ b/server/src/main/java/com/hedera/block/server/data/ObjectEvent.java
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2024 Hedera Hashgraph, LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.hedera.block.server.data;
+
+import edu.umd.cs.findbugs.annotations.NonNull;
+
+/**
+ * The ObjectEvent class defines a simple object event used to publish data to downstream
+ * subscribers through the LMAX Disruptor RingBuffer.
+ *
+ * @param the type of the data to publish
+ */
+public class ObjectEvent {
+
+ /** Constructor for the ObjectEvent class. */
+ public ObjectEvent() {}
+
+ private T val;
+
+ /**
+ * Sets the given value to be published to downstream subscribers through the LMAX Disruptor.
+ * The value must not be null and the method is thread-safe.
+ *
+ * @param val the value to set
+ */
+ public void set(@NonNull final T val) {
+ this.val = val;
+ }
+
+ /**
+ * Gets the value of the event from the LMAX Disruptor on the consumer side. The method is
+ * thread-safe.
+ *
+ * @return the value of the event
+ */
+ @NonNull
+ public T get() {
+ return val;
+ }
+}
diff --git a/server/src/main/java/com/hedera/block/server/mediator/LiveStreamMediatorBuilder.java b/server/src/main/java/com/hedera/block/server/mediator/LiveStreamMediatorBuilder.java
new file mode 100644
index 000000000..661c8f51a
--- /dev/null
+++ b/server/src/main/java/com/hedera/block/server/mediator/LiveStreamMediatorBuilder.java
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2024 Hedera Hashgraph, LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.hedera.block.server.mediator;
+
+import static com.hedera.block.protos.BlockStreamService.BlockItem;
+import static com.hedera.block.protos.BlockStreamService.SubscribeStreamResponse;
+
+import com.hedera.block.server.ServiceStatus;
+import com.hedera.block.server.config.BlockNodeContext;
+import com.hedera.block.server.data.ObjectEvent;
+import com.hedera.block.server.persistence.storage.write.BlockWriter;
+import com.lmax.disruptor.BatchEventProcessor;
+import com.lmax.disruptor.EventHandler;
+import edu.umd.cs.findbugs.annotations.NonNull;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * Use builder methods to create a {@link StreamMediator} to handle live stream events from a
+ * producer to N consumers.
+ *
+ * When a stream mediator is created, it will accept new block items from a producer, publish
+ * them to all consumers subscribed to the stream, and persist the block items to storage
+ * represented by a {@link BlockWriter}.
+ */
+public class LiveStreamMediatorBuilder {
+
+ private final BlockWriter blockWriter;
+ private final BlockNodeContext blockNodeContext;
+ private final ServiceStatus serviceStatus;
+
+ private Map<
+ EventHandler>,
+ BatchEventProcessor>>
+ subscribers;
+
+ /** The initial capacity of the subscriber map. */
+ private static final int SUBSCRIBER_INIT_CAPACITY = 32;
+
+ private LiveStreamMediatorBuilder(
+ @NonNull final BlockWriter blockWriter,
+ @NonNull final BlockNodeContext blockNodeContext,
+ @NonNull final ServiceStatus serviceStatus) {
+ this.subscribers = new ConcurrentHashMap<>(SUBSCRIBER_INIT_CAPACITY);
+ this.blockWriter = blockWriter;
+ this.blockNodeContext = blockNodeContext;
+ this.serviceStatus = serviceStatus;
+ }
+
+ /**
+ * Create a new instance of the builder using the minimum required parameters.
+ *
+ * @param blockWriter is required for the stream mediator to persist block items to storage.
+ * @param blockNodeContext is required to provide metrics reporting mechanisms to the stream
+ * mediator.
+ * @param serviceStatus is required to provide the stream mediator with access to check the
+ * status of the server and to stop the web server if necessary.
+ * @return a new stream mediator builder configured with required parameters.
+ */
+ @NonNull
+ public static LiveStreamMediatorBuilder newBuilder(
+ @NonNull final BlockWriter blockWriter,
+ @NonNull final BlockNodeContext blockNodeContext,
+ @NonNull final ServiceStatus serviceStatus) {
+ return new LiveStreamMediatorBuilder(blockWriter, blockNodeContext, serviceStatus);
+ }
+
+ /**
+ * Optionally, provide a map implementation of subscribers the stream mediator. This method
+ * should only be used for testing purposely. Provided map implementations should be thread-safe
+ * to handle subscribers being added and removed dynamically from the stream mediator at
+ * runtime.
+ *
+ * @param subscribers is the map of subscribers to set
+ * @return the builder
+ */
+ @NonNull
+ public LiveStreamMediatorBuilder subscribers(
+ @NonNull
+ final Map<
+ EventHandler>,
+ BatchEventProcessor>>
+ subscribers) {
+ this.subscribers = subscribers;
+ return this;
+ }
+
+ /**
+ * Use the build method to construct a stream mediator to handle live stream events from a
+ * producer to N consumers.
+ *
+ * @return the stream mediator to handle live stream events between a producer and N consumers.
+ */
+ @NonNull
+ public StreamMediator> build() {
+ return new LiveStreamMediatorImpl(
+ subscribers, blockWriter, serviceStatus, blockNodeContext);
+ }
+}
diff --git a/server/src/main/java/com/hedera/block/server/mediator/LiveStreamMediatorImpl.java b/server/src/main/java/com/hedera/block/server/mediator/LiveStreamMediatorImpl.java
index c5ca1fe13..09645e440 100644
--- a/server/src/main/java/com/hedera/block/server/mediator/LiveStreamMediatorImpl.java
+++ b/server/src/main/java/com/hedera/block/server/mediator/LiveStreamMediatorImpl.java
@@ -16,84 +16,199 @@
package com.hedera.block.server.mediator;
-import com.hedera.block.protos.BlockStreamServiceGrpcProto;
-import com.hedera.block.server.consumer.LiveStreamObserver;
-import com.hedera.block.server.persistence.BlockPersistenceHandler;
+import static com.hedera.block.protos.BlockStreamService.BlockItem;
+import static com.hedera.block.protos.BlockStreamService.SubscribeStreamResponse;
-import java.util.Collections;
-import java.util.LinkedHashSet;
-import java.util.Set;
+import com.hedera.block.server.ServiceStatus;
+import com.hedera.block.server.config.BlockNodeContext;
+import com.hedera.block.server.data.ObjectEvent;
+import com.hedera.block.server.metrics.MetricsService;
+import com.hedera.block.server.persistence.storage.write.BlockWriter;
+import com.lmax.disruptor.BatchEventProcessor;
+import com.lmax.disruptor.BatchEventProcessorBuilder;
+import com.lmax.disruptor.EventHandler;
+import com.lmax.disruptor.RingBuffer;
+import com.lmax.disruptor.dsl.Disruptor;
+import com.lmax.disruptor.util.DaemonThreadFactory;
+import com.swirlds.metrics.api.LongGauge;
+import edu.umd.cs.findbugs.annotations.NonNull;
+import java.io.IOException;
+import java.util.Map;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
/**
- * LiveStreamMediatorImpl is the implementation of the StreamMediator interface. It is responsible for
- * managing the subscribe and unsubscribe operations of downstream consumers. It also proxies live
- * blocks to the subscribers as they arrive and persists the blocks to the block persistence store.
+ * LiveStreamMediatorImpl is an implementation of the StreamMediator interface. It is responsible
+ * for managing the subscribe and unsubscribe operations of downstream consumers. It also proxies
+ * block items to the subscribers as they arrive via a RingBuffer and persists the block items to a
+ * store.
*/
-public class LiveStreamMediatorImpl implements StreamMediator {
+class LiveStreamMediatorImpl
+ implements StreamMediator> {
private final System.Logger LOGGER = System.getLogger(getClass().getName());
- private final Set> subscribers = Collections.synchronizedSet(new LinkedHashSet<>());
- private final BlockPersistenceHandler blockPersistenceHandler;
+ private final RingBuffer> ringBuffer;
+ private final ExecutorService executor;
- /**
- * Constructor for the LiveStreamMediatorImpl class.
- *
- * @param blockPersistenceHandler the block persistence handler
- */
- public LiveStreamMediatorImpl(final BlockPersistenceHandler blockPersistenceHandler) {
- this.blockPersistenceHandler = blockPersistenceHandler;
- }
+ private final Map<
+ EventHandler>,
+ BatchEventProcessor>>
+ subscribers;
+
+ private final BlockWriter blockWriter;
+ private final ServiceStatus serviceStatus;
+ private final BlockNodeContext blockNodeContext;
/**
- * Subscribe a new observer to the mediator
+ * Constructs a new LiveStreamMediatorImpl instance with the given subscribers, block writer,
+ * and service status. This constructor is primarily used for testing purposes. Users of this
+ * constructor should take care to supply a thread-safe map implementation for the subscribers
+ * to handle the dynamic addition and removal of subscribers at runtime.
*
- * @param liveStreamObserver the observer to be subscribed
+ * @param subscribers the map of subscribers to batch event processors. It's recommended the map
+ * implementation is thread-safe
+ * @param blockWriter the block writer to persist block items
+ * @param serviceStatus the service status to stop the service and web server if an exception
+ * occurs while persisting a block item, stop the web server for maintenance, etc
*/
- @Override
- public void subscribe(final LiveStreamObserver liveStreamObserver) {
- subscribers.add(liveStreamObserver);
+ LiveStreamMediatorImpl(
+ @NonNull
+ final Map<
+ EventHandler>,
+ BatchEventProcessor>>
+ subscribers,
+ @NonNull final BlockWriter blockWriter,
+ @NonNull final ServiceStatus serviceStatus,
+ @NonNull final BlockNodeContext blockNodeContext) {
+
+ this.subscribers = subscribers;
+ this.blockWriter = blockWriter;
+
+ // Initialize and start the disruptor
+ @NonNull
+ final Disruptor> disruptor =
+ new Disruptor<>(ObjectEvent::new, 1024, DaemonThreadFactory.INSTANCE);
+ this.ringBuffer = disruptor.start();
+ this.executor = Executors.newCachedThreadPool(DaemonThreadFactory.INSTANCE);
+ this.serviceStatus = serviceStatus;
+ this.blockNodeContext = blockNodeContext;
}
/**
- * Unsubscribe an observer from the mediator
+ * Publishes the given block item to all subscribers. If an exception occurs while persisting
+ * the block item, the service status is set to not running, and all downstream consumers are
+ * unsubscribed.
*
- * @param liveStreamObserver the observer to be unsubscribed
+ * @param blockItem the block item from the upstream producer to publish to downstream consumers
+ * @throws IOException is thrown if an exception occurs while persisting the block item
*/
@Override
- public void unsubscribe(final LiveStreamObserver liveStreamObserver) {
- if (subscribers.remove(liveStreamObserver)) {
- LOGGER.log(System.Logger.Level.DEBUG, "Successfully removed observer from subscription list");
+ public void publish(@NonNull final BlockItem blockItem) throws IOException {
+
+ if (serviceStatus.isRunning()) {
+
+ // Publish the block for all subscribers to receive
+ LOGGER.log(System.Logger.Level.DEBUG, "Publishing BlockItem: {0}", blockItem);
+ @NonNull
+ final var subscribeStreamResponse =
+ SubscribeStreamResponse.newBuilder().setBlockItem(blockItem).build();
+ ringBuffer.publishEvent((event, sequence) -> event.set(subscribeStreamResponse));
+
+ // Increment the block item counter
+ @NonNull final MetricsService metricsService = blockNodeContext.metricsService();
+ metricsService.liveBlockItems.increment();
+
+ try {
+ // Persist the BlockItem
+ blockWriter.write(blockItem);
+ } catch (IOException e) {
+ // Disable BlockItem publication for upstream producers
+ serviceStatus.setRunning(false);
+ LOGGER.log(
+ System.Logger.Level.ERROR,
+ "An exception occurred while attempting to persist the BlockItem: "
+ + blockItem,
+ e);
+
+ LOGGER.log(System.Logger.Level.DEBUG, "Send a response to end the stream");
+
+ // Publish the block for all subscribers to receive
+ @NonNull final SubscribeStreamResponse endStreamResponse = buildEndStreamResponse();
+ ringBuffer.publishEvent((event, sequence) -> event.set(endStreamResponse));
+
+ // Unsubscribe all downstream consumers
+ for (@NonNull final var subscriber : subscribers.keySet()) {
+ LOGGER.log(System.Logger.Level.DEBUG, "Unsubscribing: {0}", subscriber);
+ unsubscribe(subscriber);
+ }
+
+ throw e;
+ }
+ } else {
+ LOGGER.log(System.Logger.Level.ERROR, "StreamMediator is not accepting BlockItems");
}
}
- /**
- * Check if an observer is subscribed to the mediator
- *
- * @param observer the observer to be checked
- * @return true if the observer is subscribed, false otherwise
- */
@Override
- public boolean isSubscribed(final LiveStreamObserver observer) {
- return subscribers.contains(observer);
+ public void subscribe(
+ @NonNull final EventHandler> handler) {
+
+ // Initialize the batch event processor and set it on the ring buffer
+ @NonNull
+ final var batchEventProcessor =
+ new BatchEventProcessorBuilder()
+ .build(ringBuffer, ringBuffer.newBarrier(), handler);
+
+ ringBuffer.addGatingSequences(batchEventProcessor.getSequence());
+ executor.execute(batchEventProcessor);
+
+ // Keep track of the subscriber
+ subscribers.put(handler, batchEventProcessor);
+
+ updateSubscriberMetrics();
}
- /**
- * Notify all observers of a new block
- *
- * @param block the block to be notified to all observers
- */
@Override
- public void notifyAll(final BlockStreamServiceGrpcProto.Block block) {
+ public void unsubscribe(
+ @NonNull final EventHandler> handler) {
+
+ // Remove the subscriber
+ @NonNull final var batchEventProcessor = subscribers.remove(handler);
+ if (batchEventProcessor == null) {
+ LOGGER.log(System.Logger.Level.ERROR, "Subscriber not found: {0}", handler);
- LOGGER.log(System.Logger.Level.DEBUG, "Notifying " + subscribers.size() + " observers of a new block");
+ } else {
- // Proxy the block to all live stream subscribers
- for (final var subscriber : subscribers) {
- subscriber.notify(block);
+ // Stop the processor
+ batchEventProcessor.halt();
+
+ // Remove the gating sequence from the ring buffer
+ ringBuffer.removeGatingSequence(batchEventProcessor.getSequence());
}
- // Persist the block
- blockPersistenceHandler.persist(block);
+ updateSubscriberMetrics();
+ }
+
+ @Override
+ public boolean isSubscribed(
+ @NonNull EventHandler> handler) {
+ return subscribers.containsKey(handler);
+ }
+
+ @NonNull
+ private static SubscribeStreamResponse buildEndStreamResponse() {
+ // The current spec does not contain a generic error code for
+ // SubscribeStreamResponseCode.
+ // TODO: Replace READ_STREAM_SUCCESS (2) with a generic error code?
+ return SubscribeStreamResponse.newBuilder()
+ .setStatus(SubscribeStreamResponse.SubscribeStreamResponseCode.READ_STREAM_SUCCESS)
+ .build();
+ }
+
+ private void updateSubscriberMetrics() {
+ @NonNull final MetricsService metricsService = blockNodeContext.metricsService();
+ @NonNull final LongGauge longGauge = metricsService.subscribers;
+ longGauge.set(subscribers.size());
}
}
diff --git a/server/src/main/java/com/hedera/block/server/mediator/Publisher.java b/server/src/main/java/com/hedera/block/server/mediator/Publisher.java
new file mode 100644
index 000000000..a34455bcc
--- /dev/null
+++ b/server/src/main/java/com/hedera/block/server/mediator/Publisher.java
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2024 Hedera Hashgraph, LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.hedera.block.server.mediator;
+
+import edu.umd.cs.findbugs.annotations.NonNull;
+import java.io.IOException;
+
+/**
+ * The Publisher interface defines the contract for publishing data emitted by the producer to
+ * downstream subscribers.
+ *
+ * @param the type of data to publish
+ */
+public interface Publisher {
+
+ /**
+ * Publishes the given data to the downstream subscribers.
+ *
+ * @param data the data emitted by an upstream producer to publish to downstream subscribers.
+ * @throws IOException thrown if an I/O error occurs while publishing the item to the
+ * subscribers.
+ */
+ void publish(@NonNull final U data) throws IOException;
+}
diff --git a/server/src/main/java/com/hedera/block/server/mediator/StreamMediator.java b/server/src/main/java/com/hedera/block/server/mediator/StreamMediator.java
index 07f448df7..87e2d4030 100644
--- a/server/src/main/java/com/hedera/block/server/mediator/StreamMediator.java
+++ b/server/src/main/java/com/hedera/block/server/mediator/StreamMediator.java
@@ -16,47 +16,13 @@
package com.hedera.block.server.mediator;
-import com.hedera.block.server.consumer.LiveStreamObserver;
-
/**
- * The StreamMediator interface represents a one-to-many bridge between a bidirectional stream of blocks from a
- * producer (e.g. a Consensus Node) and N consumers each requesting a bidirectional connection to get
- * a "live stream" of blocks from the producer. StreamMediator satisfies Helidon's type requirements for a
- * bidirectional StreamObserver representing a stream of blocks returned FROM the downstream consuming client.
- * However, the StreamObserver type may be distinct from Block type streamed TO the client. The type definition
- * for the onNext() method provides the flexibility for the StreamObserver and the Block types to vary independently.
+ * The StreamMediator marker interface defines the combination of Publisher and SubscriptionHandler
+ * contracts. It defines multiple views of the underlying implementation, allowing producers to
+ * publish data while the service and downstream subscribers can manage which consumers are
+ * subscribed to the stream of events.
*
- * @param The type of the block
- * @param The type of the StreamObserver
+ * @param the type of the data to publish
+ * @param the type of the events the SubscriptionHandler processes
*/
-public interface StreamMediator {
-
- /**
- * Subscribes a new LiveStreamObserver to receive blocks from the producer as they arrive
- *
- * @param observer the LiveStreamObserver to subscribe
- */
- void subscribe(final LiveStreamObserver observer);
-
- /**
- * Unsubscribes a LiveStreamObserver from the producer
- *
- * @param observer the LiveStreamObserver to unsubscribe
- */
- void unsubscribe(final LiveStreamObserver observer);
-
- /**
- * Checks if the observer is subscribed to the producer
- *
- * @param observer the LiveStreamObserver to check
- * @return true if the observer is subscribed, false otherwise
- */
- boolean isSubscribed(final LiveStreamObserver observer);
-
- /**
- * Passes the newly arrived block to all subscribers
- *
- * @param block the block to pass to the subscribers
- */
- void notifyAll(final U block);
-}
+public interface StreamMediator extends Publisher, SubscriptionHandler {}
diff --git a/server/src/main/java/com/hedera/block/server/mediator/SubscriptionHandler.java b/server/src/main/java/com/hedera/block/server/mediator/SubscriptionHandler.java
new file mode 100644
index 000000000..b3ec66390
--- /dev/null
+++ b/server/src/main/java/com/hedera/block/server/mediator/SubscriptionHandler.java
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2024 Hedera Hashgraph, LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.hedera.block.server.mediator;
+
+import com.lmax.disruptor.EventHandler;
+import edu.umd.cs.findbugs.annotations.NonNull;
+
+/**
+ * The SubscriptionHandler interface defines the contract for subscribing and unsubscribing
+ * downstream consumers to the stream of events.
+ *
+ * @param the type of the subscription events
+ */
+public interface SubscriptionHandler {
+
+ /**
+ * Subscribes the given handler to the stream of events.
+ *
+ * @param handler the handler to subscribe
+ */
+ void subscribe(@NonNull final EventHandler handler);
+
+ /**
+ * Unsubscribes the given handler from the stream of events.
+ *
+ * @param handler the handler to unsubscribe
+ */
+ void unsubscribe(@NonNull final EventHandler handler);
+
+ /**
+ * Checks if the given handler is subscribed to the stream of events.
+ *
+ * @param handler the handler to check
+ * @return true if the handler is subscribed, false otherwise
+ */
+ boolean isSubscribed(@NonNull final EventHandler handler);
+}
diff --git a/server/src/main/java/com/hedera/block/server/metrics/MetricsService.java b/server/src/main/java/com/hedera/block/server/metrics/MetricsService.java
index 9c01d0103..4dd4a59af 100644
--- a/server/src/main/java/com/hedera/block/server/metrics/MetricsService.java
+++ b/server/src/main/java/com/hedera/block/server/metrics/MetricsService.java
@@ -21,29 +21,69 @@
import com.swirlds.metrics.api.Metrics;
import edu.umd.cs.findbugs.annotations.NonNull;
+/**
+ * Use member variables of this class to update metric data for the Hedera Block Node.
+ *
+ * Metrics are updated by calling the appropriate method on the metric object instance. For
+ * example, to increment a counter, call {@link Counter#increment()}.
+ */
public class MetricsService {
- private static final String CATEGORY = "app";
+ private static final String CATEGORY = "hedera_block_node";
private static final LongGauge.Config EXAMPLE_GAUGE =
new LongGauge.Config(CATEGORY, "exampleGauge").withDescription("An example gauge");
- /** An example gauge. */
- public final LongGauge exampleGauge;
-
private static final Counter.Config EXAMPLE_COUNTER =
new Counter.Config(CATEGORY, "exampleCounter").withDescription("An example counter");
+ // Live BlockItem Counter
+ private static final Counter.Config LIVE_BLOCK_ITEM_COUNTER =
+ new Counter.Config(CATEGORY, "live_block_items").withDescription("Live BlockItems");
+
+ // Block Persistence Counter
+ private static final Counter.Config BLOCK_PERSISTENCE_COUNTER =
+ new Counter.Config(CATEGORY, "blocks_persisted").withDescription("Blocks Persisted");
+
+ // Subscriber Gauge
+ private static final LongGauge.Config SUBSCRIBER_GAUGE =
+ new LongGauge.Config(CATEGORY, "subscribers").withDescription("Subscribers");
+
+ // Single Block Retrieved Counter
+ private static final Counter.Config SINGLE_BLOCK_RETRIEVED_COUNTER =
+ new Counter.Config(CATEGORY, "single_blocks_retrieved")
+ .withDescription("Single Blocks Retrieved");
+
+ /** An example gauge. */
+ public final LongGauge exampleGauge;
+
/** An example counter. */
public final Counter exampleCounter;
+ /** Update the counter of live block items transiting via the live stream. */
+ public final Counter liveBlockItems;
+
+ /** Update the counter of blocks persisted to storage. */
+ public final Counter blocksPersisted;
+
+ /** Update the counter of single blocks retrieved from storage. */
+ public final Counter singleBlocksRetrieved;
+
+ /** Update the gauge of subscribers currently consuming to the live stream. */
+ public final LongGauge subscribers;
+
/**
- * Creates a new instance of {@link MetricsService}.
+ * Create singleton instance of metrics service to be used throughout the application.
*
* @param metrics the metrics instance
*/
public MetricsService(@NonNull final Metrics metrics) {
this.exampleGauge = metrics.getOrCreate(EXAMPLE_GAUGE);
this.exampleCounter = metrics.getOrCreate(EXAMPLE_COUNTER);
+
+ this.liveBlockItems = metrics.getOrCreate(LIVE_BLOCK_ITEM_COUNTER);
+ this.blocksPersisted = metrics.getOrCreate(BLOCK_PERSISTENCE_COUNTER);
+ this.singleBlocksRetrieved = metrics.getOrCreate(SINGLE_BLOCK_RETRIEVED_COUNTER);
+ this.subscribers = metrics.getOrCreate(SUBSCRIBER_GAUGE);
}
}
diff --git a/server/src/main/java/com/hedera/block/server/persistence/BlockPersistenceHandler.java b/server/src/main/java/com/hedera/block/server/persistence/BlockPersistenceHandler.java
deleted file mode 100644
index fd228b145..000000000
--- a/server/src/main/java/com/hedera/block/server/persistence/BlockPersistenceHandler.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (C) 2024 Hedera Hashgraph, LLC
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.hedera.block.server.persistence;
-
-import java.util.Optional;
-import java.util.Queue;
-
-/**
- * The BlockPersistenceHandler interface defines operations to persist and read blocks.
- * The interface is used to abstract underlying storage mechanisms.
- *
- * @param the type of block to persist
- */
-public interface BlockPersistenceHandler {
-
- /**
- * Persists a block.
- *
- * @param block the block to persist
- * @return the id of the block
- */
- Long persist(final V block);
-
- /**
- * Reads a block.
- *
- * @param id the id of the block to read
- * @return an Optional of the block
- */
- Optional read(final long id);
-
- /**
- * Reads a range of blocks.
- *
- * @param startBlockId the id of the first block to read
- * @param endBlockId the id of the last block to read
- * @return a queue of blocks
- */
- Queue readRange(final long startBlockId, final long endBlockId);
-}
diff --git a/server/src/main/java/com/hedera/block/server/persistence/WriteThroughCacheHandler.java b/server/src/main/java/com/hedera/block/server/persistence/WriteThroughCacheHandler.java
deleted file mode 100644
index 277e5cb2d..000000000
--- a/server/src/main/java/com/hedera/block/server/persistence/WriteThroughCacheHandler.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright (C) 2024 Hedera Hashgraph, LLC
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.hedera.block.server.persistence;
-
-import com.hedera.block.protos.BlockStreamServiceGrpcProto;
-import com.hedera.block.server.persistence.storage.BlockStorage;
-
-import java.util.ArrayDeque;
-import java.util.LinkedList;
-import java.util.Optional;
-import java.util.Queue;
-
-/**
- * Write-Through cache handler coordinates between the block storage and the block cache to ensure the block
- * is persisted to the storage before being cached.
- */
-public class WriteThroughCacheHandler implements BlockPersistenceHandler {
-
- private final BlockStorage blockStorage;
-
- /**
- * Constructor for the WriteThroughCacheHandler class.
- *
- * @param blockStorage the block storage
- */
- public WriteThroughCacheHandler(final BlockStorage blockStorage) {
- this.blockStorage = blockStorage;
- }
-
- /**
- * Persists the block to the block storage and cache the block.
- *
- * @param block the block to persist
- * @return the block id
- */
- @Override
- public Long persist(final BlockStreamServiceGrpcProto.Block block) {
-
- // Write-Through cache
- blockStorage.write(block);
- return block.getId();
- }
-
- /**
- * Reads a range of blocks from the block storage and cache.
- *
- * @param startBlockId the start block id
- * @param endBlockId the end block id
- * @return a queue of blocks
- */
- @Override
- public Queue readRange(final long startBlockId, final long endBlockId) {
- final Queue blocks = new LinkedList<>();
-
- long count = startBlockId;
- Optional blockOpt = read(count);
- while (count <= endBlockId && blockOpt.isPresent()) {
- final BlockStreamServiceGrpcProto.Block block = blockOpt.get();
- blocks.add(block);
- blockOpt = read(++count);
- }
-
- return blocks;
- }
-
- /**
- * The read method first checks the cache for the block.
- * If the block is not in cache, then it reads from storage and
- * updates the cache.
- *
- * @param id the block id
- * @return an Optional with the block
- */
- @Override
- public Optional read(final long id) {
- return blockStorage.read(id);
- }
-}
diff --git a/server/src/main/java/com/hedera/block/server/persistence/storage/FileSystemBlockStorage.java b/server/src/main/java/com/hedera/block/server/persistence/storage/FileSystemBlockStorage.java
deleted file mode 100644
index 821ea4e92..000000000
--- a/server/src/main/java/com/hedera/block/server/persistence/storage/FileSystemBlockStorage.java
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Copyright (C) 2024 Hedera Hashgraph, LLC
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.hedera.block.server.persistence.storage;
-
-import com.hedera.block.protos.BlockStreamServiceGrpcProto;
-import io.helidon.config.Config;
-
-import java.io.FileInputStream;
-import java.io.FileNotFoundException;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.util.Optional;
-
-import static com.hedera.block.server.Constants.BLOCKNODE_STORAGE_ROOT_PATH_KEY;
-
-/**
- * The FileSystemBlockStorage class implements the BlockStorage interface to store blocks to the filesystem.
- */
-public class FileSystemBlockStorage implements BlockStorage {
-
- public static final String BLOCK_FILE_EXTENSION = ".blk";
-
- private final Path blockNodeRootPath;
- private final System.Logger LOGGER = System.getLogger(getClass().getName());
-
- /**
- * Constructs a FileSystemBlockStorage object.
- *
- * @param key the key to use to retrieve the block node root path from the configuration
- * @param config the configuration
- * @throws IOException if an I/O error occurs while initializing the block node root directory
- */
- public FileSystemBlockStorage(final String key, final Config config) throws IOException {
-
- LOGGER.log(System.Logger.Level.INFO, "Initializing FileSystemBlockStorage");
- LOGGER.log(System.Logger.Level.INFO, config.toString());
-
- blockNodeRootPath = Path.of(config
- .get(key)
- .asString()
- .get());
-
- LOGGER.log(System.Logger.Level.INFO, "Block Node Root Path: " + blockNodeRootPath);
-
- if (!blockNodeRootPath.isAbsolute()) {
- throw new IllegalArgumentException(BLOCKNODE_STORAGE_ROOT_PATH_KEY+ " must be an absolute path");
- }
-
- // Initialize the block node root directory if it does not exist
- if (Files.notExists(blockNodeRootPath)) {
- Files.createDirectory(blockNodeRootPath);
- LOGGER.log(System.Logger.Level.INFO, "Created block node root directory: " + blockNodeRootPath);
- } else {
- LOGGER.log(System.Logger.Level.INFO, "Using existing block node root directory: " + blockNodeRootPath);
- }
- }
-
- /**
- * Writes a block to the filesystem.
- *
- * @param block the block to write
- * @return the id of the block
- */
- @Override
- public Optional write(final BlockStreamServiceGrpcProto.Block block) {
- Long id = block.getId();
- final String fullPath = resolvePath(id);
-
- try (FileOutputStream fos = new FileOutputStream(fullPath)) {
- block.writeTo(fos);
- LOGGER.log(System.Logger.Level.DEBUG, "Successfully wrote the block file: " + fullPath);
-
- return Optional.of(id);
- } catch (IOException e) {
- LOGGER.log(System.Logger.Level.ERROR, "Error writing the protobuf to a file", e);
- return Optional.empty();
- }
- }
-
- /**
- * Reads a block from the filesystem.
- *
- * @param id the id of the block to read
- * @return the block
- */
- @Override
- public Optional read(final Long id) {
- return read(resolvePath(id));
- }
-
- private Optional read(final String filePath) {
-
- try (FileInputStream fis = new FileInputStream(filePath)) {
- return Optional.of(BlockStreamServiceGrpcProto.Block.parseFrom(fis));
- } catch (FileNotFoundException io) {
- LOGGER.log(System.Logger.Level.ERROR, "Error reading file: " + filePath, io);
- return Optional.empty();
- } catch (IOException io) {
- throw new RuntimeException("Error reading file: " + filePath, io);
- }
- }
-
- private String resolvePath(final Long id) {
-
- String fileName = id + BLOCK_FILE_EXTENSION;
- Path fullPath = blockNodeRootPath.resolve(fileName);
- LOGGER.log(System.Logger.Level.DEBUG, "Resolved fullPath: " + fullPath);
-
- return fullPath.toString();
- }
-}
diff --git a/server/src/main/java/com/hedera/block/server/persistence/storage/Util.java b/server/src/main/java/com/hedera/block/server/persistence/storage/Util.java
new file mode 100644
index 000000000..5dec622c0
--- /dev/null
+++ b/server/src/main/java/com/hedera/block/server/persistence/storage/Util.java
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2024 Hedera Hashgraph, LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.hedera.block.server.persistence.storage;
+
+import edu.umd.cs.findbugs.annotations.NonNull;
+import java.nio.file.attribute.FileAttribute;
+import java.nio.file.attribute.PosixFilePermission;
+import java.nio.file.attribute.PosixFilePermissions;
+import java.util.Set;
+
+/** Util methods provide common functionality for the storage package. */
+public final class Util {
+ private Util() {}
+
+ /**
+ * Default file permissions defines the file and directory for the storage package.
+ *
+ * Default permissions are set to: rwxr-xr-x
+ */
+ @NonNull
+ public static final FileAttribute> defaultPerms =
+ PosixFilePermissions.asFileAttribute(
+ Set.of(
+ PosixFilePermission.OWNER_READ,
+ PosixFilePermission.OWNER_WRITE,
+ PosixFilePermission.OWNER_EXECUTE,
+ PosixFilePermission.GROUP_READ,
+ PosixFilePermission.GROUP_EXECUTE,
+ PosixFilePermission.OTHERS_READ,
+ PosixFilePermission.OTHERS_EXECUTE));
+}
diff --git a/server/src/main/java/com/hedera/block/server/persistence/storage/read/BlockAsDirReader.java b/server/src/main/java/com/hedera/block/server/persistence/storage/read/BlockAsDirReader.java
new file mode 100644
index 000000000..7c5c8f156
--- /dev/null
+++ b/server/src/main/java/com/hedera/block/server/persistence/storage/read/BlockAsDirReader.java
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2024 Hedera Hashgraph, LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.hedera.block.server.persistence.storage.read;
+
+import static com.hedera.block.protos.BlockStreamService.Block;
+import static com.hedera.block.protos.BlockStreamService.Block.Builder;
+import static com.hedera.block.protos.BlockStreamService.BlockItem;
+import static com.hedera.block.server.Constants.BLOCK_FILE_EXTENSION;
+
+import edu.umd.cs.findbugs.annotations.NonNull;
+import io.helidon.config.Config;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.attribute.FileAttribute;
+import java.nio.file.attribute.PosixFilePermission;
+import java.util.Optional;
+import java.util.Set;
+
+/**
+ * The BlockAsDirReader class reads a block from the file system. The block is stored as a directory
+ * containing block items. The block items are stored as files within the block directory.
+ */
+class BlockAsDirReader implements BlockReader {
+
+ private final System.Logger LOGGER = System.getLogger(getClass().getName());
+
+ private final Path blockNodeRootPath;
+ private final FileAttribute> filePerms;
+
+ /**
+ * Constructor for the BlockAsDirReader class. It initializes the BlockAsDirReader with the
+ * given parameters.
+ *
+ * @param key the key to retrieve the block node root path from the configuration
+ * @param config the configuration to retrieve the block node root path
+ * @param filePerms the file permissions to set on the block node root path
+ */
+ BlockAsDirReader(
+ @NonNull final String key,
+ @NonNull final Config config,
+ @NonNull final FileAttribute> filePerms) {
+
+ LOGGER.log(System.Logger.Level.INFO, "Initializing FileSystemBlockReader");
+
+ @NonNull final Path blockNodeRootPath = Path.of(config.get(key).asString().get());
+
+ LOGGER.log(System.Logger.Level.INFO, config.toString());
+ LOGGER.log(System.Logger.Level.INFO, "Block Node Root Path: " + blockNodeRootPath);
+
+ this.blockNodeRootPath = blockNodeRootPath;
+ this.filePerms = filePerms;
+ }
+
+ /**
+ * Reads a block from the file system. The block is stored as a directory containing block
+ * items. The block items are stored as files within the block directory.
+ *
+ * @param blockNumber the block number to read
+ * @return an optional of the block read from the file system
+ * @throws IOException if an I/O error occurs
+ */
+ @NonNull
+ @Override
+ public Optional read(final long blockNumber) throws IOException {
+
+ // Verify path attributes of the block node root path
+ if (isPathDisqualified(blockNodeRootPath)) {
+ return Optional.empty();
+ }
+
+ // Verify path attributes of the block directory within the
+ // block node root path
+ @NonNull final Path blockPath = blockNodeRootPath.resolve(String.valueOf(blockNumber));
+ if (isPathDisqualified(blockPath)) {
+ return Optional.empty();
+ }
+
+ try {
+ // There may be thousands of BlockItem files in a Block directory.
+ // The BlockItems must be added to the outbound Block object in order.
+ // However, using something like DirectoryStream will iterate without
+ // any guaranteed order. To avoid sorting and to keep the retrieval
+ // process linear with the number of BlockItems in the Block, run a loop
+ // to fetch BlockItems in the expected order. For example, in a Block
+ // directory "1" containing 10 BlockItem files (1.blk, 2.blk, 3.blk, ...,
+ // 10.blk), the loop will directly fetch the BlockItems in order based on
+ // their file names. The loop will exit when it attempts to read a
+ // BlockItem file that does not exist (e.g., 11.blk).
+ @NonNull final Builder builder = Block.newBuilder();
+ for (int i = 1; ; i++) {
+ @NonNull final Path blockItemPath = blockPath.resolve(i + BLOCK_FILE_EXTENSION);
+ @NonNull
+ final Optional blockItemOpt = readBlockItem(blockItemPath.toString());
+ if (blockItemOpt.isPresent()) {
+ builder.addBlockItems(blockItemOpt.get());
+ continue;
+ }
+
+ break;
+ }
+
+ // Return the Block
+ return Optional.of(builder.build());
+ } catch (IOException io) {
+ LOGGER.log(System.Logger.Level.ERROR, "Error reading block: " + blockPath, io);
+
+ throw io;
+ }
+ }
+
+ @NonNull
+ private Optional readBlockItem(@NonNull final String blockItemPath)
+ throws IOException {
+
+ try (FileInputStream fis = new FileInputStream(blockItemPath)) {
+ return Optional.of(BlockItem.parseFrom(fis));
+ } catch (FileNotFoundException io) {
+ final File f = new File(blockItemPath);
+ if (!f.exists()) {
+ // The outer loop caller will continue to query
+ // for the next BlockItem file based on the index
+ // until the FileNotFoundException is thrown.
+ // It's expected that this exception will be caught
+ // at the end of every query.
+ return Optional.empty();
+ }
+
+ // FileNotFound is also thrown when a file cannot be read.
+ // So re-throw here to make a different decision upstream.
+ throw io;
+ }
+ }
+
+ private boolean isPathDisqualified(@NonNull final Path path) {
+
+ if (!path.toFile().exists()) {
+ // This code path gets hit if a consumer
+ // requests a block that does not exist.
+ // Only log this as a debug message.
+ LOGGER.log(System.Logger.Level.DEBUG, "Path not found: {0}", path);
+ return true;
+ }
+
+ if (!path.toFile().canRead()) {
+ LOGGER.log(System.Logger.Level.ERROR, "Path not readable: {0}", path);
+ LOGGER.log(
+ System.Logger.Level.ERROR,
+ "Attempting to repair the path permissions: {0}",
+ path);
+
+ try {
+ // If resetting the permissions fails or
+ // if the path is still unreadable, return true.
+ setPerm(path, filePerms.value());
+ if (!path.toFile().canRead()) {
+ return true;
+ }
+ } catch (IOException e) {
+ LOGGER.log(
+ System.Logger.Level.ERROR, "Error setting permissions on: {0}" + path, e);
+ return true;
+ }
+ }
+
+ if (!path.toFile().isDirectory()) {
+ LOGGER.log(System.Logger.Level.ERROR, "Path is not a directory: {0}", path);
+ return true;
+ }
+
+ return false;
+ }
+
+ /**
+ * Sets the permissions on the given path. This method is protected to allow for testing.
+ *
+ * @param path the path to set the permissions on
+ * @param perms the permissions to set on the path
+ * @throws IOException if an I/O error occurs
+ */
+ protected void setPerm(@NonNull final Path path, @NonNull final Set perms)
+ throws IOException {
+ Files.setPosixFilePermissions(path, perms);
+ }
+}
diff --git a/server/src/main/java/com/hedera/block/server/persistence/storage/read/BlockAsDirReaderBuilder.java b/server/src/main/java/com/hedera/block/server/persistence/storage/read/BlockAsDirReaderBuilder.java
new file mode 100644
index 000000000..c0a3f9d54
--- /dev/null
+++ b/server/src/main/java/com/hedera/block/server/persistence/storage/read/BlockAsDirReaderBuilder.java
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2024 Hedera Hashgraph, LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.hedera.block.server.persistence.storage.read;
+
+import static com.hedera.block.protos.BlockStreamService.Block;
+
+import com.hedera.block.server.persistence.storage.Util;
+import edu.umd.cs.findbugs.annotations.NonNull;
+import io.helidon.config.Config;
+import java.nio.file.attribute.FileAttribute;
+import java.nio.file.attribute.PosixFilePermission;
+import java.util.Set;
+
+/**
+ * Use builder methods to create a {@link BlockReader} to read blocks from storage.
+ *
+ * When a block reader is created, it will provide access to read blocks from storage.
+ */
+public class BlockAsDirReaderBuilder {
+
+ private final String key;
+ private final Config config;
+ private FileAttribute> filePerms = Util.defaultPerms;
+
+ private BlockAsDirReaderBuilder(@NonNull final String key, @NonNull final Config config) {
+ this.key = key;
+ this.config = config;
+ }
+
+ /**
+ * Creates a new block reader builder using the minimum required parameters.
+ *
+ * @param key is required to read pertinent configuration info.
+ * @param config is required to supply pertinent configuration info for the block reader to
+ * access storage.
+ * @return a block reader builder configured with required parameters.
+ */
+ @NonNull
+ public static BlockAsDirReaderBuilder newBuilder(
+ @NonNull final String key, @NonNull final Config config) {
+ return new BlockAsDirReaderBuilder(key, config);
+ }
+
+ /**
+ * Optionally, provide file permissions for the block reader to use when managing block files
+ * and directories.
+ *
+ * By default, the block reader will use the permissions defined in {@link
+ * Util#defaultPerms}. This method is primarily used for testing purposes. Default values should
+ * be sufficient for production use.
+ *
+ * @param filePerms the file permissions to use when managing block files and directories.
+ * @return a block reader builder configured with required parameters.
+ */
+ @NonNull
+ public BlockAsDirReaderBuilder filePerms(
+ @NonNull final FileAttribute> filePerms) {
+ this.filePerms = filePerms;
+ return this;
+ }
+
+ /**
+ * Use the build method to construct a block reader to read blocks from storage.
+ *
+ * @return a new block reader configured with the parameters provided to the builder.
+ */
+ @NonNull
+ public BlockReader build() {
+ return new BlockAsDirReader(key, config, filePerms);
+ }
+}
diff --git a/server/src/test/java/com/hedera/block/server/persistence/PersistTestUtils.java b/server/src/main/java/com/hedera/block/server/persistence/storage/read/BlockReader.java
similarity index 50%
rename from server/src/test/java/com/hedera/block/server/persistence/PersistTestUtils.java
rename to server/src/main/java/com/hedera/block/server/persistence/storage/read/BlockReader.java
index 87e1e83b5..9f3442b4b 100644
--- a/server/src/test/java/com/hedera/block/server/persistence/PersistTestUtils.java
+++ b/server/src/main/java/com/hedera/block/server/persistence/storage/read/BlockReader.java
@@ -14,25 +14,26 @@
* limitations under the License.
*/
-package com.hedera.block.server.persistence;
+package com.hedera.block.server.persistence.storage.read;
-import com.hedera.block.protos.BlockStreamServiceGrpcProto;
+import edu.umd.cs.findbugs.annotations.NonNull;
+import java.io.IOException;
+import java.util.Optional;
-import java.util.List;
-import java.util.stream.Collectors;
-import java.util.stream.IntStream;
-
-public final class PersistTestUtils {
-
- private PersistTestUtils() {}
+/**
+ * The BlockReader interface defines the contract for reading a block from storage.
+ *
+ * @param the type of the block to read
+ */
+public interface BlockReader {
- public static List generateBlocks(int numOfBlocks) {
- return IntStream
- .range(1, numOfBlocks + 1)
- .mapToObj(i -> BlockStreamServiceGrpcProto.Block
- .newBuilder()
- .setId(i)
- .setValue("block-node-" + i).build()
- )
- .collect(Collectors.toList());
- }}
+ /**
+ * Reads the block with the given block number.
+ *
+ * @param blockNumber the block number of the block to read
+ * @return the block with the given block number
+ * @throws IOException if an I/O error occurs fetching the block
+ */
+ @NonNull
+ Optional read(final long blockNumber) throws IOException;
+}
diff --git a/server/src/main/java/com/hedera/block/server/persistence/storage/remove/BlockAsDirRemover.java b/server/src/main/java/com/hedera/block/server/persistence/storage/remove/BlockAsDirRemover.java
new file mode 100644
index 000000000..85d4c28fb
--- /dev/null
+++ b/server/src/main/java/com/hedera/block/server/persistence/storage/remove/BlockAsDirRemover.java
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2024 Hedera Hashgraph, LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.hedera.block.server.persistence.storage.remove;
+
+import edu.umd.cs.findbugs.annotations.NonNull;
+import edu.umd.cs.findbugs.annotations.Nullable;
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.attribute.FileAttribute;
+import java.nio.file.attribute.PosixFilePermission;
+import java.util.Set;
+
+/**
+ * The BlockAsDirRemover class removes a block from the file system. The block is stored as a
+ * directory containing block items. The block items are stored as files within the block directory.
+ */
+public class BlockAsDirRemover implements BlockRemover {
+
+ private final System.Logger LOGGER = System.getLogger(getClass().getName());
+
+ private final Path blockNodeRootPath;
+ private final FileAttribute> filePerms;
+
+ /**
+ * Create a block remover to manage removing blocks from storage.
+ *
+ * @param blockNodeRootPath the root path where blocks are stored.
+ * @param filePerms the file permissions used to manage removing blocks.
+ */
+ public BlockAsDirRemover(
+ @NonNull final Path blockNodeRootPath,
+ @NonNull final FileAttribute> filePerms) {
+ this.blockNodeRootPath = blockNodeRootPath;
+ this.filePerms = filePerms;
+ }
+
+ /**
+ * Removes a block from the file system.
+ *
+ * @param id the id of the block to remove
+ * @throws IOException if an I/O error occurs
+ */
+ @Override
+ public void remove(final long id) throws IOException {
+
+ // Calculate the block path and proactively set the permissions
+ // for removal
+ @NonNull final Path blockPath = blockNodeRootPath.resolve(String.valueOf(id));
+ if (Files.notExists(blockPath)) {
+ LOGGER.log(System.Logger.Level.ERROR, "Block does not exist: {0}", id);
+ return;
+ }
+
+ Files.setPosixFilePermissions(blockPath, filePerms.value());
+
+ // Best effort to delete the block
+ if (!delete(blockPath.toFile())) {
+ LOGGER.log(System.Logger.Level.ERROR, "Failed to delete block: {0}", id);
+ }
+ }
+
+ private static boolean delete(@NonNull final File file) {
+
+ // Recursively delete the contents
+ // of the directory
+ if (file.isDirectory()) {
+ @Nullable final File[] files = file.listFiles();
+ if (files != null) {
+ for (@NonNull final File f : files) {
+ delete(f);
+ }
+ }
+ }
+
+ return file.delete();
+ }
+}
diff --git a/server/src/main/java/com/hedera/block/server/persistence/storage/remove/BlockRemover.java b/server/src/main/java/com/hedera/block/server/persistence/storage/remove/BlockRemover.java
new file mode 100644
index 000000000..bee39bed8
--- /dev/null
+++ b/server/src/main/java/com/hedera/block/server/persistence/storage/remove/BlockRemover.java
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2024 Hedera Hashgraph, LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.hedera.block.server.persistence.storage.remove;
+
+import java.io.IOException;
+
+/** The BlockRemover interface defines the contract for removing a block from storage. */
+public interface BlockRemover {
+
+ /**
+ * Remove a block with the given block number.
+ *
+ * @param blockNumber the block number of the block to remove.
+ * @throws IOException when failing to remove a block.
+ */
+ void remove(final long blockNumber) throws IOException;
+}
diff --git a/server/src/main/java/com/hedera/block/server/persistence/storage/write/BlockAsDirWriter.java b/server/src/main/java/com/hedera/block/server/persistence/storage/write/BlockAsDirWriter.java
new file mode 100644
index 000000000..c04965454
--- /dev/null
+++ b/server/src/main/java/com/hedera/block/server/persistence/storage/write/BlockAsDirWriter.java
@@ -0,0 +1,230 @@
+/*
+ * Copyright (C) 2024 Hedera Hashgraph, LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.hedera.block.server.persistence.storage.write;
+
+import static com.hedera.block.protos.BlockStreamService.BlockItem;
+import static com.hedera.block.server.Constants.BLOCKNODE_STORAGE_ROOT_PATH_KEY;
+import static com.hedera.block.server.Constants.BLOCK_FILE_EXTENSION;
+
+import com.hedera.block.server.config.BlockNodeContext;
+import com.hedera.block.server.metrics.MetricsService;
+import com.hedera.block.server.persistence.storage.remove.BlockRemover;
+import edu.umd.cs.findbugs.annotations.NonNull;
+import io.helidon.config.Config;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.attribute.FileAttribute;
+import java.nio.file.attribute.PosixFilePermission;
+import java.util.Set;
+
+/**
+ * The BlockAsDirWriter class writes a block to the filesystem block item by block item. In this
+ * implementation, a block is represented as a directory of BlockItems. BlockAsDirWriter is stateful
+ * and uses the known, deterministic block item attributes to create new "blocks" (directories) and
+ * write block items to them. If an unexpected exception occurs during the write operation, the
+ * BlockAsDirWriter will first try to correct file permissions if appropriate. It will then attempt
+ * to remove the current, incomplete block (directory) before re-throwing the exception to the
+ * caller.
+ */
+class BlockAsDirWriter implements BlockWriter {
+
+ private final System.Logger LOGGER = System.getLogger(getClass().getName());
+
+ private final Path blockNodeRootPath;
+ private long blockNodeFileNameIndex;
+ private Path currentBlockDir;
+ private final FileAttribute> filePerms;
+ private final BlockRemover blockRemover;
+ private final BlockNodeContext blockNodeContext;
+
+ /**
+ * Constructor for the BlockAsDirWriter class. It initializes the BlockAsDirWriter with the
+ * given key, config, block remover, and file permissions.
+ *
+ * @param key the key to use to retrieve the block node root path from the config
+ * @param config the config to use to retrieve the block node root path
+ * @param blockRemover the block remover to use to remove blocks if there is an exception while
+ * writing a partial block
+ * @param filePerms the file permissions to set on the block node root path
+ * @throws IOException if an error occurs while initializing the BlockAsDirWriter
+ */
+ BlockAsDirWriter(
+ @NonNull final String key,
+ @NonNull final Config config,
+ @NonNull final BlockRemover blockRemover,
+ @NonNull final FileAttribute> filePerms,
+ @NonNull final BlockNodeContext blockNodeContext)
+ throws IOException {
+
+ LOGGER.log(System.Logger.Level.INFO, "Initializing FileSystemBlockStorage");
+
+ final Path blockNodeRootPath = Path.of(config.get(key).asString().get());
+
+ LOGGER.log(System.Logger.Level.INFO, config.toString());
+ LOGGER.log(System.Logger.Level.INFO, "Block Node Root Path: " + blockNodeRootPath);
+
+ this.blockNodeRootPath = blockNodeRootPath;
+ this.blockRemover = blockRemover;
+ this.filePerms = filePerms;
+
+ if (!blockNodeRootPath.isAbsolute()) {
+ throw new IllegalArgumentException(
+ BLOCKNODE_STORAGE_ROOT_PATH_KEY + " must be an absolute path");
+ }
+
+ // Initialize the block node root directory if it does not exist
+ createPath(blockNodeRootPath, System.Logger.Level.INFO);
+
+ this.blockNodeContext = blockNodeContext;
+ }
+
+ /**
+ * Writes the given block item to the filesystem.
+ *
+ * @param blockItem the block item to write
+ * @throws IOException if an error occurs while writing the block item
+ */
+ @Override
+ public void write(@NonNull final BlockItem blockItem) throws IOException {
+
+ if (blockItem.hasHeader()) {
+ resetState(blockItem);
+ }
+
+ @NonNull final Path blockItemFilePath = calculateBlockItemPath();
+ for (int retries = 0; ; retries++) {
+ try {
+ write(blockItemFilePath, blockItem);
+ break;
+ } catch (IOException e) {
+
+ LOGGER.log(
+ System.Logger.Level.ERROR,
+ "Error writing the BlockItem protobuf to a file: ",
+ e);
+
+ // Remove the block if repairing the permissions fails
+ if (retries > 0) {
+ // Attempt to remove the block
+ blockRemover.remove(Long.parseLong(currentBlockDir.toString()));
+ throw e;
+ } else {
+ // Attempt to repair the permissions on the block path
+ // and the blockItem path
+ repairPermissions(blockNodeRootPath);
+ repairPermissions(calculateBlockPath());
+ LOGGER.log(
+ System.Logger.Level.INFO,
+ "Retrying to write the BlockItem protobuf to a file");
+ }
+ }
+ }
+ }
+
+ /**
+ * Writes the given block item to the filesystem. This method is protected to allow for testing.
+ *
+ * @param blockItemFilePath the path to the block item file
+ * @param blockItem the block item to write
+ * @throws IOException if an error occurs while writing the block item
+ */
+ protected void write(@NonNull final Path blockItemFilePath, @NonNull final BlockItem blockItem)
+ throws IOException {
+ try (@NonNull
+ final FileOutputStream fos = new FileOutputStream(blockItemFilePath.toString())) {
+ blockItem.writeTo(fos);
+ LOGGER.log(
+ System.Logger.Level.INFO,
+ "Successfully wrote the block item file: {0}",
+ blockItemFilePath);
+ } catch (IOException e) {
+ LOGGER.log(
+ System.Logger.Level.ERROR,
+ "Error writing the BlockItem protobuf to a file: ",
+ e);
+ throw e;
+ }
+ }
+
+ private void resetState(@NonNull final BlockItem blockItem) throws IOException {
+ // Here a "block" is represented as a directory of BlockItems.
+ // Create the "block" directory based on the block_number
+ currentBlockDir = Path.of(String.valueOf(blockItem.getHeader().getBlockNumber()));
+
+ // Check the blockNodeRootPath permissions and
+ // attempt to repair them if possible
+ repairPermissions(blockNodeRootPath);
+
+ // Construct the path to the block directory
+ createPath(calculateBlockPath(), System.Logger.Level.DEBUG);
+
+ // Reset
+ blockNodeFileNameIndex = 0;
+
+ // Increment the block counter
+ @NonNull final MetricsService metricsService = blockNodeContext.metricsService();
+ metricsService.blocksPersisted.increment();
+ }
+
+ private void repairPermissions(@NonNull final Path path) throws IOException {
+ final boolean isWritable = Files.isWritable(path);
+ if (!isWritable) {
+ LOGGER.log(
+ System.Logger.Level.ERROR,
+ "Block node root directory is not writable. Attempting to change the"
+ + " permissions.");
+
+ try {
+ // Attempt to restore the permissions on the block node root directory
+ Files.setPosixFilePermissions(path, filePerms.value());
+ } catch (IOException e) {
+ LOGGER.log(
+ System.Logger.Level.ERROR,
+ "Error setting permissions on the path: " + path,
+ e);
+ throw e;
+ }
+ }
+ }
+
+ @NonNull
+ private Path calculateBlockItemPath() {
+ // Build the path to a .blk file
+ @NonNull final Path blockPath = calculateBlockPath();
+ blockNodeFileNameIndex++;
+ return blockPath.resolve(blockNodeFileNameIndex + BLOCK_FILE_EXTENSION);
+ }
+
+ @NonNull
+ private Path calculateBlockPath() {
+ return blockNodeRootPath.resolve(currentBlockDir);
+ }
+
+ private void createPath(
+ @NonNull final Path blockNodePath, @NonNull final System.Logger.Level logLevel)
+ throws IOException {
+ // Initialize the Block directory if it does not exist
+ if (Files.notExists(blockNodePath)) {
+ Files.createDirectory(blockNodePath, filePerms);
+ LOGGER.log(logLevel, "Created block node root directory: " + blockNodePath);
+ } else {
+ LOGGER.log(logLevel, "Using existing block node root directory: " + blockNodePath);
+ }
+ }
+}
diff --git a/server/src/main/java/com/hedera/block/server/persistence/storage/write/BlockAsDirWriterBuilder.java b/server/src/main/java/com/hedera/block/server/persistence/storage/write/BlockAsDirWriterBuilder.java
new file mode 100644
index 000000000..579e8d5e5
--- /dev/null
+++ b/server/src/main/java/com/hedera/block/server/persistence/storage/write/BlockAsDirWriterBuilder.java
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2024 Hedera Hashgraph, LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.hedera.block.server.persistence.storage.write;
+
+import static com.hedera.block.protos.BlockStreamService.BlockItem;
+
+import com.hedera.block.server.config.BlockNodeContext;
+import com.hedera.block.server.persistence.storage.Util;
+import com.hedera.block.server.persistence.storage.remove.BlockAsDirRemover;
+import com.hedera.block.server.persistence.storage.remove.BlockRemover;
+import edu.umd.cs.findbugs.annotations.NonNull;
+import io.helidon.config.Config;
+import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.attribute.FileAttribute;
+import java.nio.file.attribute.PosixFilePermission;
+import java.util.Set;
+
+/**
+ * Use builder methods to create a {@link BlockWriter} to write blocks to storage.
+ *
+ * When a block writer is created, it will provide access to write blocks to storage.
+ */
+public class BlockAsDirWriterBuilder {
+
+ private final String key;
+ private final Config config;
+ private final BlockNodeContext blockNodeContext;
+ private FileAttribute> filePerms = Util.defaultPerms;
+ private BlockRemover blockRemover;
+
+ private BlockAsDirWriterBuilder(
+ @NonNull final String key,
+ @NonNull final Config config,
+ @NonNull final BlockNodeContext blockNodeContext) {
+ this.key = key;
+ this.config = config;
+ this.blockNodeContext = blockNodeContext;
+ this.blockRemover =
+ new BlockAsDirRemover(Path.of(config.get(key).asString().get()), Util.defaultPerms);
+ }
+
+ /**
+ * Creates a new block writer builder using the minimum required parameters.
+ *
+ * @param key is required to read pertinent configuration info.
+ * @param config is required to supply pertinent configuration info for the block writer to
+ * access storage.
+ * @param blockNodeContext is required to provide metrics reporting mechanisms .
+ * @return a block writer builder configured with required parameters.
+ */
+ @NonNull
+ public static BlockAsDirWriterBuilder newBuilder(
+ @NonNull final String key,
+ @NonNull final Config config,
+ @NonNull final BlockNodeContext blockNodeContext) {
+
+ return new BlockAsDirWriterBuilder(key, config, blockNodeContext);
+ }
+
+ /**
+ * Optionally, provide file permissions for the block writer to use when managing block files
+ * and directories.
+ *
+ * By default, the block writer will use the permissions defined in {@link
+ * Util#defaultPerms}. This method is primarily used for testing purposes. Default values should
+ * be sufficient for production use.
+ *
+ * @param filePerms the file permissions to use when managing block files and directories.
+ * @return a block writer builder configured with required parameters.
+ */
+ @NonNull
+ public BlockAsDirWriterBuilder filePerms(
+ @NonNull FileAttribute> filePerms) {
+ this.filePerms = filePerms;
+ return this;
+ }
+
+ /**
+ * Optionally, provide a block remover to remove blocks from storage.
+ *
+ * By default, the block writer will use the block remover defined in {@link
+ * BlockAsDirRemover}. This method is primarily used for testing purposes. Default values should
+ * be sufficient for production use.
+ *
+ * @param blockRemover the block remover to use when removing blocks from storage.
+ * @return a block writer builder configured with required parameters.
+ */
+ @NonNull
+ public BlockAsDirWriterBuilder blockRemover(@NonNull BlockRemover blockRemover) {
+ this.blockRemover = blockRemover;
+ return this;
+ }
+
+ /**
+ * Use the build method to construct a block writer to write blocks to storage.
+ *
+ * @return a new block writer configured with the parameters provided to the builder.
+ * @throws IOException when an error occurs while persisting block items to storage.
+ */
+ @NonNull
+ public BlockWriter build() throws IOException {
+ return new BlockAsDirWriter(key, config, blockRemover, filePerms, blockNodeContext);
+ }
+}
diff --git a/server/src/main/java/com/hedera/block/server/persistence/storage/BlockStorage.java b/server/src/main/java/com/hedera/block/server/persistence/storage/write/BlockWriter.java
similarity index 52%
rename from server/src/main/java/com/hedera/block/server/persistence/storage/BlockStorage.java
rename to server/src/main/java/com/hedera/block/server/persistence/storage/write/BlockWriter.java
index 7f42807d7..84baa9e18 100644
--- a/server/src/main/java/com/hedera/block/server/persistence/storage/BlockStorage.java
+++ b/server/src/main/java/com/hedera/block/server/persistence/storage/write/BlockWriter.java
@@ -14,30 +14,23 @@
* limitations under the License.
*/
-package com.hedera.block.server.persistence.storage;
+package com.hedera.block.server.persistence.storage.write;
-import java.util.Optional;
+import edu.umd.cs.findbugs.annotations.NonNull;
+import java.io.IOException;
/**
- * The BlockStorage interface defines operations to write and read blocks to a persistent store.
+ * BlockWriter defines the contract for writing block items to storage.
*
- * @param the type of block to store
+ * @param the type of the block item to write
*/
-public interface BlockStorage {
+public interface BlockWriter {
/**
- * Writes a block to storage.
+ * Write the block item to storage.
*
- * @param block the block to write
- * @return the id of the block
+ * @param blockItem the block item to write to storage.
+ * @throws IOException when failing to write the block item to storage.
*/
- Optional write(final V block);
-
- /**
- * Reads a block from storage.
- *
- * @param blockId the id of the block to read
- * @return the block
- */
- Optional read(final Long blockId);
+ void write(@NonNull final V blockItem) throws IOException;
}
diff --git a/server/src/main/java/com/hedera/block/server/producer/ItemAckBuilder.java b/server/src/main/java/com/hedera/block/server/producer/ItemAckBuilder.java
new file mode 100644
index 000000000..2e34dfd32
--- /dev/null
+++ b/server/src/main/java/com/hedera/block/server/producer/ItemAckBuilder.java
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2024 Hedera Hashgraph, LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.hedera.block.server.producer;
+
+import static com.hedera.block.protos.BlockStreamService.BlockItem;
+import static com.hedera.block.protos.BlockStreamService.PublishStreamResponse.ItemAcknowledgement;
+import static com.hedera.block.server.producer.Util.getFakeHash;
+
+import com.google.protobuf.ByteString;
+import edu.umd.cs.findbugs.annotations.NonNull;
+import java.io.IOException;
+import java.security.NoSuchAlgorithmException;
+
+/**
+ * The ItemAckBuilder class defines a simple item acknowledgement builder used to create an
+ * acknowledgement type response. This is a placeholder and should be replaced with real hash
+ * functionality once the hedera-protobufs types are integrated.
+ */
+public class ItemAckBuilder {
+
+ /** Constructor for the ItemAckBuilder class. */
+ public ItemAckBuilder() {}
+
+ /**
+ * Builds an item acknowledgement for the given block item.
+ *
+ * @param blockItem the block item to build the acknowledgement for
+ * @return the item acknowledgement for the given block item
+ * @throws IOException thrown if an I/O error occurs while building the acknowledgement
+ * @throws NoSuchAlgorithmException thrown if the SHA-384 algorithm is not available
+ */
+ @NonNull
+ public ItemAcknowledgement buildAck(@NonNull final BlockItem blockItem)
+ throws IOException, NoSuchAlgorithmException {
+ // TODO: Use real hash and real hedera-protobufs types
+ return ItemAcknowledgement.newBuilder()
+ .setItemAck(ByteString.copyFrom(getFakeHash(blockItem)))
+ .build();
+ }
+}
diff --git a/server/src/main/java/com/hedera/block/server/producer/ProducerBlockItemObserver.java b/server/src/main/java/com/hedera/block/server/producer/ProducerBlockItemObserver.java
new file mode 100644
index 000000000..199110807
--- /dev/null
+++ b/server/src/main/java/com/hedera/block/server/producer/ProducerBlockItemObserver.java
@@ -0,0 +1,156 @@
+/*
+ * Copyright (C) 2024 Hedera Hashgraph, LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.hedera.block.server.producer;
+
+import static com.hedera.block.protos.BlockStreamService.*;
+import static com.hedera.block.protos.BlockStreamService.PublishStreamResponse.*;
+
+import com.hedera.block.server.ServiceStatus;
+import com.hedera.block.server.mediator.Publisher;
+import edu.umd.cs.findbugs.annotations.NonNull;
+import io.grpc.stub.StreamObserver;
+import java.io.IOException;
+import java.security.NoSuchAlgorithmException;
+
+/**
+ * The ProducerBlockStreamObserver class plugs into Helidon's server-initiated bidirectional gRPC
+ * service implementation. Helidon calls methods on this class as networking events occur with the
+ * connection to the upstream producer (e.g. block items streamed from the Consensus Node to the
+ * server).
+ */
+public class ProducerBlockItemObserver implements StreamObserver {
+
+ private final System.Logger LOGGER = System.getLogger(getClass().getName());
+
+ private final StreamObserver publishStreamResponseObserver;
+ private final Publisher publisher;
+ private final ItemAckBuilder itemAckBuilder;
+ private final ServiceStatus serviceStatus;
+
+ /**
+ * Constructor for the ProducerBlockStreamObserver class. It is responsible for calling the
+ * mediator with blocks as they arrive from the upstream producer. It also sends responses back
+ * to the upstream producer via the responseStreamObserver.
+ *
+ * @param publisher the block item publisher to used to pass block items to consumers as they
+ * arrive from the upstream producer
+ * @param publishStreamResponseObserver the response stream observer to send responses back to
+ * the upstream producer for each block item processed
+ * @param itemAckBuilder the item acknowledgement builder to use when sending responses back to
+ * the upstream producer for each block item processed
+ * @param serviceStatus the service status used to determine if the downstream service is
+ * accepting block items. In the event of an unrecoverable exception, it will be used to
+ * stop the web server.
+ */
+ public ProducerBlockItemObserver(
+ @NonNull final Publisher publisher,
+ @NonNull final StreamObserver publishStreamResponseObserver,
+ @NonNull final ItemAckBuilder itemAckBuilder,
+ @NonNull final ServiceStatus serviceStatus) {
+
+ this.publisher = publisher;
+ this.publishStreamResponseObserver = publishStreamResponseObserver;
+ this.itemAckBuilder = itemAckBuilder;
+ this.serviceStatus = serviceStatus;
+ }
+
+ /**
+ * Helidon triggers this method when it receives a new PublishStreamRequest from the upstream
+ * producer. The method publish the block item data to all subscribers via the Publisher and
+ * sends a response back to the upstream producer.
+ *
+ * @param publishStreamRequest the PublishStreamRequest received from the upstream producer
+ */
+ @Override
+ public void onNext(@NonNull final PublishStreamRequest publishStreamRequest) {
+
+ @NonNull final BlockItem blockItem = publishStreamRequest.getBlockItem();
+
+ try {
+ // Publish the block to all the subscribers unless
+ // there's an issue with the StreamMediator.
+ if (serviceStatus.isRunning()) {
+
+ // Publish the block to the mediator
+ publisher.publish(blockItem);
+
+ try {
+ // Send a successful response
+ publishStreamResponseObserver.onNext(buildSuccessStreamResponse(blockItem));
+
+ } catch (IOException | NoSuchAlgorithmException e) {
+ @NonNull final var errorResponse = buildErrorStreamResponse();
+ publishStreamResponseObserver.onNext(errorResponse);
+ LOGGER.log(System.Logger.Level.ERROR, "Error calculating hash: ", e);
+ }
+
+ } else {
+ // Close the upstream connection to the producer(s)
+ @NonNull final var errorResponse = buildErrorStreamResponse();
+ publishStreamResponseObserver.onNext(errorResponse);
+ LOGGER.log(System.Logger.Level.DEBUG, "StreamMediator is not accepting BlockItems");
+ }
+ } catch (IOException io) {
+ @NonNull final var errorResponse = buildErrorStreamResponse();
+ publishStreamResponseObserver.onNext(errorResponse);
+ LOGGER.log(System.Logger.Level.ERROR, "Exception thrown publishing BlockItem: ", io);
+
+ LOGGER.log(System.Logger.Level.ERROR, "Shutting down the web server");
+ serviceStatus.stopWebServer();
+ }
+ }
+
+ @NonNull
+ private PublishStreamResponse buildSuccessStreamResponse(@NonNull final BlockItem blockItem)
+ throws IOException, NoSuchAlgorithmException {
+ @NonNull final ItemAcknowledgement itemAck = itemAckBuilder.buildAck(blockItem);
+ return PublishStreamResponse.newBuilder().setAcknowledgement(itemAck).build();
+ }
+
+ @NonNull
+ private static PublishStreamResponse buildErrorStreamResponse() {
+ // TODO: Replace this with a real error enum.
+ @NonNull
+ final EndOfStream endOfStream =
+ EndOfStream.newBuilder()
+ .setStatus(PublishStreamResponseCode.STREAM_ITEMS_UNKNOWN)
+ .build();
+ return PublishStreamResponse.newBuilder().setStatus(endOfStream).build();
+ }
+
+ /**
+ * Helidon triggers this method when an error occurs on the bidirectional stream to the upstream
+ * producer.
+ *
+ * @param t the error occurred on the stream
+ */
+ @Override
+ public void onError(@NonNull final Throwable t) {
+ LOGGER.log(System.Logger.Level.ERROR, "onError method invoked with an exception: ", t);
+ publishStreamResponseObserver.onError(t);
+ }
+
+ /**
+ * Helidon triggers this method when the bidirectional stream to the upstream producer is
+ * completed. Unsubscribe all the observers from the mediator.
+ */
+ @Override
+ public void onCompleted() {
+ LOGGER.log(System.Logger.Level.DEBUG, "ProducerBlockStreamObserver completed");
+ publishStreamResponseObserver.onCompleted();
+ }
+}
diff --git a/server/src/main/java/com/hedera/block/server/producer/ProducerBlockStreamObserver.java b/server/src/main/java/com/hedera/block/server/producer/ProducerBlockStreamObserver.java
deleted file mode 100644
index 5691cc24e..000000000
--- a/server/src/main/java/com/hedera/block/server/producer/ProducerBlockStreamObserver.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright (C) 2024 Hedera Hashgraph, LLC
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.hedera.block.server.producer;
-
-import com.hedera.block.protos.BlockStreamServiceGrpcProto;
-import com.hedera.block.server.mediator.StreamMediator;
-import io.grpc.stub.StreamObserver;
-
-/**
- * The ProducerBlockStreamObserver class plugs into Helidon's server-initiated bidirectional
- * gRPC service implementation. Helidon calls methods on this class as networking events occur
- * with the connection to the upstream producer (e.g. blocks streamed from the Consensus Node to
- * the server).
- */
-public class ProducerBlockStreamObserver implements StreamObserver {
-
- private final System.Logger LOGGER = System.getLogger(getClass().getName());
-
- private final StreamMediator streamMediator;
- private final StreamObserver responseStreamObserver;
-
- /**
- * Constructor for the ProducerBlockStreamObserver class. It is responsible for calling the mediator with blocks
- * as they arrive from the upstream producer. It also sends responses back to the upstream producer via the
- * responseStreamObserver.
- *
- * @param streamMediator the stream mediator
- * @param responseStreamObserver the response stream observer
- */
- public ProducerBlockStreamObserver(final StreamMediator streamMediator,
- final StreamObserver responseStreamObserver) {
- this.streamMediator = streamMediator;
- this.responseStreamObserver = responseStreamObserver;
- }
-
- /**
- * Helidon triggers this method when it receives a new block from the upstream producer. The method notifies all
- * the mediator subscribers and sends a response back to the upstream producer.
- *
- * @param block the block streamed from the upstream producer
- */
- @Override
- public void onNext(final BlockStreamServiceGrpcProto.Block block) {
-
- // Notify all the mediator subscribers
- streamMediator.notifyAll(block);
-
- // Send a response back to the upstream producer
- final BlockStreamServiceGrpcProto.BlockResponse blockResponse = BlockStreamServiceGrpcProto.BlockResponse.newBuilder().setId(block.getId()).build();
- responseStreamObserver.onNext(blockResponse);
- }
-
- /**
- * Helidon triggers this method when an error occurs on the bidirectional stream to the upstream producer.
- *
- * @param t the error occurred on the stream
- */
- @Override
- public void onError(final Throwable t) {
- LOGGER.log(System.Logger.Level.ERROR, "onError method invoked with an exception", t);
- responseStreamObserver.onError(t);
- }
-
- /**
- * Helidon triggers this method when the bidirectional stream to the upstream producer is completed.
- * Unsubscribe all the observers from the mediator.
- */
- @Override
- public void onCompleted() {
- LOGGER.log(System.Logger.Level.DEBUG, "ProducerBlockStreamObserver completed");
- responseStreamObserver.onCompleted();
- }
-}
diff --git a/server/src/main/java/com/hedera/block/server/producer/Util.java b/server/src/main/java/com/hedera/block/server/producer/Util.java
new file mode 100644
index 000000000..2ca685590
--- /dev/null
+++ b/server/src/main/java/com/hedera/block/server/producer/Util.java
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2024 Hedera Hashgraph, LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.hedera.block.server.producer;
+
+import static com.hedera.block.protos.BlockStreamService.BlockItem;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.ObjectOutputStream;
+import java.security.MessageDigest;
+import java.security.NoSuchAlgorithmException;
+
+/** Utility class for the BlockNode service. */
+public final class Util {
+ private Util() {}
+
+ /**
+ * Gets a fake hash for the given block item. This is a placeholder and should be replaced with
+ * real hash functionality once the hedera-protobufs types are integrated.
+ *
+ * @param blockItem the block item to get the fake hash for
+ * @return the fake hash for the given block item
+ * @throws IOException thrown if an I/O error occurs while getting the fake hash
+ * @throws NoSuchAlgorithmException thrown if the SHA-384 algorithm is not available
+ */
+ public static byte[] getFakeHash(BlockItem blockItem)
+ throws IOException, NoSuchAlgorithmException {
+
+ try (final ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
+ final ObjectOutputStream objectOutputStream =
+ new ObjectOutputStream(byteArrayOutputStream)) {
+ objectOutputStream.writeObject(blockItem);
+
+ // Get the serialized bytes
+ byte[] serializedObject = byteArrayOutputStream.toByteArray();
+
+ // Calculate the SHA-384 hash
+ MessageDigest digest = MessageDigest.getInstance("SHA-384");
+ return digest.digest(serializedObject);
+ }
+ }
+}
diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java
index fb6f62f67..c78b82458 100644
--- a/server/src/main/java/module-info.java
+++ b/server/src/main/java/module-info.java
@@ -4,6 +4,7 @@
module com.hedera.block.server {
requires com.hedera.block.protos;
requires com.google.protobuf;
+ requires com.lmax.disruptor;
requires com.swirlds.common;
requires com.swirlds.config.api;
requires com.swirlds.config.extensions;
diff --git a/server/src/test/java/com/hedera/block/server/BlockStreamServiceIT.java b/server/src/test/java/com/hedera/block/server/BlockStreamServiceIT.java
new file mode 100644
index 000000000..e867c5f92
--- /dev/null
+++ b/server/src/test/java/com/hedera/block/server/BlockStreamServiceIT.java
@@ -0,0 +1,619 @@
+/*
+ * Copyright (C) 2024 Hedera Hashgraph, LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.hedera.block.server;
+
+import static com.hedera.block.protos.BlockStreamService.*;
+import static com.hedera.block.protos.BlockStreamService.PublishStreamResponse.*;
+import static com.hedera.block.server.util.PersistTestUtils.generateBlockItems;
+import static org.junit.jupiter.api.Assertions.*;
+import static org.mockito.Mockito.*;
+
+import com.hedera.block.server.config.BlockNodeContext;
+import com.hedera.block.server.config.BlockNodeContextFactory;
+import com.hedera.block.server.data.ObjectEvent;
+import com.hedera.block.server.mediator.LiveStreamMediatorBuilder;
+import com.hedera.block.server.mediator.StreamMediator;
+import com.hedera.block.server.persistence.storage.Util;
+import com.hedera.block.server.persistence.storage.read.BlockAsDirReaderBuilder;
+import com.hedera.block.server.persistence.storage.read.BlockReader;
+import com.hedera.block.server.persistence.storage.remove.BlockAsDirRemover;
+import com.hedera.block.server.persistence.storage.remove.BlockRemover;
+import com.hedera.block.server.persistence.storage.write.BlockAsDirWriterBuilder;
+import com.hedera.block.server.persistence.storage.write.BlockWriter;
+import com.hedera.block.server.producer.ItemAckBuilder;
+import com.hedera.block.server.util.TestUtils;
+import com.lmax.disruptor.BatchEventProcessor;
+import com.lmax.disruptor.EventHandler;
+import io.grpc.stub.StreamObserver;
+import io.helidon.config.Config;
+import io.helidon.config.MapConfigSource;
+import io.helidon.config.spi.ConfigSource;
+import io.helidon.webserver.WebServer;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.attribute.FileAttribute;
+import java.nio.file.attribute.PosixFilePermission;
+import java.security.NoSuchAlgorithmException;
+import java.util.*;
+import java.util.concurrent.ConcurrentHashMap;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.ExtendWith;
+import org.mockito.Mock;
+import org.mockito.junit.jupiter.MockitoExtension;
+
+@ExtendWith(MockitoExtension.class)
+public class BlockStreamServiceIT {
+
+ private final System.Logger LOGGER = System.getLogger(getClass().getName());
+
+ @Mock private StreamMediator> streamMediator;
+
+ @Mock private StreamObserver publishStreamResponseObserver;
+ @Mock private StreamObserver singleBlockResponseStreamObserver;
+
+ @Mock private SubscribeStreamRequest subscribeStreamRequest;
+
+ @Mock private StreamObserver subscribeStreamObserver1;
+ @Mock private StreamObserver subscribeStreamObserver2;
+ @Mock private StreamObserver subscribeStreamObserver3;
+
+ @Mock private StreamObserver subscribeStreamObserver4;
+ @Mock private StreamObserver subscribeStreamObserver5;
+ @Mock private StreamObserver subscribeStreamObserver6;
+
+ @Mock private WebServer webServer;
+ @Mock private ServiceStatus serviceStatus;
+
+ @Mock private BlockReader blockReader;
+ @Mock private BlockWriter blockWriter;
+
+ private static final String TEMP_DIR = "block-node-unit-test-dir";
+ private static final String JUNIT = "my-junit-test";
+
+ private Path testPath;
+ private Config testConfig;
+
+ private static final int testTimeout = 200;
+
+ @BeforeEach
+ public void setUp() throws IOException {
+ testPath = Files.createTempDirectory(TEMP_DIR);
+ LOGGER.log(System.Logger.Level.INFO, "Created temp directory: " + testPath.toString());
+
+ Map testProperties = Map.of(JUNIT, testPath.toString());
+ ConfigSource testConfigSource = MapConfigSource.builder().map(testProperties).build();
+ testConfig = Config.builder(testConfigSource).build();
+ }
+
+ @AfterEach
+ public void tearDown() {
+ TestUtils.deleteDirectory(testPath.toFile());
+ }
+
+ @Test
+ public void testPublishBlockStreamRegistrationAndExecution()
+ throws IOException, NoSuchAlgorithmException {
+
+ final BlockNodeContext blockNodeContext = BlockNodeContextFactory.create();
+ final BlockStreamService blockStreamService =
+ new BlockStreamService(
+ 1500L,
+ new ItemAckBuilder(),
+ streamMediator,
+ blockReader,
+ serviceStatus,
+ blockNodeContext);
+
+ // Enable the serviceStatus
+ when(serviceStatus.isRunning()).thenReturn(true);
+
+ final StreamObserver streamObserver =
+ blockStreamService.publishBlockStream(publishStreamResponseObserver);
+
+ final BlockItem blockItem = generateBlockItems(1).getFirst();
+ final PublishStreamRequest publishStreamRequest =
+ PublishStreamRequest.newBuilder().setBlockItem(blockItem).build();
+
+ // Calling onNext() as Helidon will
+ streamObserver.onNext(publishStreamRequest);
+
+ final ItemAcknowledgement itemAck = new ItemAckBuilder().buildAck(blockItem);
+ final PublishStreamResponse publishStreamResponse =
+ PublishStreamResponse.newBuilder().setAcknowledgement(itemAck).build();
+
+ // Verify the BlockItem message is sent to the mediator
+ verify(streamMediator, timeout(testTimeout).times(1)).publish(blockItem);
+
+ // Verify our custom StreamObserver implementation builds and sends
+ // a response back to the producer
+ verify(publishStreamResponseObserver, timeout(testTimeout).times(1))
+ .onNext(publishStreamResponse);
+
+ // Close the stream as Helidon does
+ streamObserver.onCompleted();
+
+ // verify the onCompleted() method is invoked on the wrapped StreamObserver
+ verify(publishStreamResponseObserver, timeout(testTimeout).times(1)).onCompleted();
+ }
+
+ @Test
+ public void testSubscribeBlockStream() throws IOException {
+
+ final ServiceStatus serviceStatus = new ServiceStatusImpl();
+ serviceStatus.setWebServer(webServer);
+
+ final BlockNodeContext blockNodeContext = BlockNodeContextFactory.create();
+ final var streamMediator =
+ LiveStreamMediatorBuilder.newBuilder(blockWriter, blockNodeContext, serviceStatus)
+ .build();
+
+ // Build the BlockStreamService
+ final BlockStreamService blockStreamService =
+ new BlockStreamService(
+ 2000L,
+ new ItemAckBuilder(),
+ streamMediator,
+ blockReader,
+ serviceStatus,
+ blockNodeContext);
+
+ // Subscribe the consumers
+ blockStreamService.subscribeBlockStream(subscribeStreamRequest, subscribeStreamObserver1);
+ blockStreamService.subscribeBlockStream(subscribeStreamRequest, subscribeStreamObserver2);
+ blockStreamService.subscribeBlockStream(subscribeStreamRequest, subscribeStreamObserver3);
+
+ // Subscribe the producer
+ final StreamObserver streamObserver =
+ blockStreamService.publishBlockStream(publishStreamResponseObserver);
+
+ // Build the BlockItem
+ final List blockItems = generateBlockItems(1);
+ final PublishStreamRequest publishStreamRequest =
+ PublishStreamRequest.newBuilder().setBlockItem(blockItems.getFirst()).build();
+
+ // Calling onNext() with a BlockItem
+ streamObserver.onNext(publishStreamRequest);
+
+ // Verify the counter was incremented
+ assertEquals(1, blockNodeContext.metricsService().liveBlockItems.get());
+
+ verify(blockWriter, timeout(testTimeout).times(1)).write(blockItems.getFirst());
+
+ final SubscribeStreamResponse subscribeStreamResponse =
+ SubscribeStreamResponse.newBuilder().setBlockItem(blockItems.getFirst()).build();
+
+ verify(subscribeStreamObserver1, timeout(testTimeout).times(1))
+ .onNext(subscribeStreamResponse);
+ verify(subscribeStreamObserver2, timeout(testTimeout).times(1))
+ .onNext(subscribeStreamResponse);
+ verify(subscribeStreamObserver3, timeout(testTimeout).times(1))
+ .onNext(subscribeStreamResponse);
+ }
+
+ @Test
+ public void testFullHappyPath() throws IOException {
+ int numberOfBlocks = 100;
+
+ final BlockStreamService blockStreamService = buildBlockStreamService();
+
+ // Enable the serviceStatus
+ when(serviceStatus.isRunning()).thenReturn(true);
+
+ // Pass a StreamObserver to the producer as Helidon does
+ final StreamObserver streamObserver =
+ blockStreamService.publishBlockStream(publishStreamResponseObserver);
+
+ final List blockItems = generateBlockItems(numberOfBlocks);
+
+ blockStreamService.subscribeBlockStream(subscribeStreamRequest, subscribeStreamObserver1);
+ blockStreamService.subscribeBlockStream(subscribeStreamRequest, subscribeStreamObserver2);
+ blockStreamService.subscribeBlockStream(subscribeStreamRequest, subscribeStreamObserver3);
+
+ for (BlockItem blockItem : blockItems) {
+ final PublishStreamRequest publishStreamRequest =
+ PublishStreamRequest.newBuilder().setBlockItem(blockItem).build();
+ streamObserver.onNext(publishStreamRequest);
+ }
+
+ verifySubscribeStreamResponse(
+ numberOfBlocks, 0, numberOfBlocks, subscribeStreamObserver1, blockItems);
+ verifySubscribeStreamResponse(
+ numberOfBlocks, 0, numberOfBlocks, subscribeStreamObserver2, blockItems);
+ verifySubscribeStreamResponse(
+ numberOfBlocks, 0, numberOfBlocks, subscribeStreamObserver3, blockItems);
+
+ streamObserver.onCompleted();
+ }
+
+ @Test
+ public void testFullWithSubscribersAddedDynamically() throws IOException {
+
+ int numberOfBlocks = 100;
+
+ final BlockStreamService blockStreamService = buildBlockStreamService();
+
+ // Enable the serviceStatus
+ when(serviceStatus.isRunning()).thenReturn(true);
+
+ // Pass a StreamObserver to the producer as Helidon does
+ final StreamObserver streamObserver =
+ blockStreamService.publishBlockStream(publishStreamResponseObserver);
+
+ final List blockItems = generateBlockItems(numberOfBlocks);
+
+ // Subscribe the initial consumers
+ blockStreamService.subscribeBlockStream(subscribeStreamRequest, subscribeStreamObserver1);
+ blockStreamService.subscribeBlockStream(subscribeStreamRequest, subscribeStreamObserver2);
+ blockStreamService.subscribeBlockStream(subscribeStreamRequest, subscribeStreamObserver3);
+
+ for (int i = 0; i < blockItems.size(); i++) {
+ final PublishStreamRequest publishStreamRequest =
+ PublishStreamRequest.newBuilder().setBlockItem(blockItems.get(i)).build();
+
+ // Add a new subscriber
+ if (i == 51) {
+ blockStreamService.subscribeBlockStream(
+ subscribeStreamRequest, subscribeStreamObserver4);
+ }
+
+ // Transmit the BlockItem
+ streamObserver.onNext(publishStreamRequest);
+
+ // Add a new subscriber
+ if (i == 76) {
+ blockStreamService.subscribeBlockStream(
+ subscribeStreamRequest, subscribeStreamObserver5);
+ }
+
+ // Add a new subscriber
+ if (i == 88) {
+ blockStreamService.subscribeBlockStream(
+ subscribeStreamRequest, subscribeStreamObserver6);
+ }
+ }
+
+ // Verify subscribers who were listening before the stream started
+ verifySubscribeStreamResponse(
+ numberOfBlocks, 0, numberOfBlocks, subscribeStreamObserver1, blockItems);
+ verifySubscribeStreamResponse(
+ numberOfBlocks, 0, numberOfBlocks, subscribeStreamObserver2, blockItems);
+ verifySubscribeStreamResponse(
+ numberOfBlocks, 0, numberOfBlocks, subscribeStreamObserver3, blockItems);
+
+ // Verify subscribers added while the stream was in progress.
+ // The Helidon-provided StreamObserver onNext() method will only
+ // be called once a Header BlockItem is reached. So, pass in
+ // the number of BlockItems to wait to verify that the method
+ // was called.
+ verifySubscribeStreamResponse(
+ numberOfBlocks, 59, numberOfBlocks, subscribeStreamObserver4, blockItems);
+ verifySubscribeStreamResponse(
+ numberOfBlocks, 79, numberOfBlocks, subscribeStreamObserver5, blockItems);
+ verifySubscribeStreamResponse(
+ numberOfBlocks, 89, numberOfBlocks, subscribeStreamObserver6, blockItems);
+
+ streamObserver.onCompleted();
+ }
+
+ @Test
+ public void testSubAndUnsubWhileStreaming() throws IOException {
+
+ int numberOfBlocks = 100;
+
+ final LinkedHashMap<
+ EventHandler>,
+ BatchEventProcessor>>
+ subscribers = new LinkedHashMap<>();
+ final var streamMediator = buildStreamMediator(subscribers, Util.defaultPerms);
+ final var blockStreamService =
+ buildBlockStreamService(streamMediator, blockReader, serviceStatus);
+
+ // Enable the serviceStatus
+ when(serviceStatus.isRunning()).thenReturn(true);
+
+ // Pass a StreamObserver to the producer as Helidon does
+ final StreamObserver streamObserver =
+ blockStreamService.publishBlockStream(publishStreamResponseObserver);
+
+ final List blockItems = generateBlockItems(numberOfBlocks);
+
+ blockStreamService.subscribeBlockStream(subscribeStreamRequest, subscribeStreamObserver1);
+ blockStreamService.subscribeBlockStream(subscribeStreamRequest, subscribeStreamObserver2);
+ blockStreamService.subscribeBlockStream(subscribeStreamRequest, subscribeStreamObserver3);
+
+ for (int i = 0; i < blockItems.size(); i++) {
+ final PublishStreamRequest publishStreamRequest =
+ PublishStreamRequest.newBuilder().setBlockItem(blockItems.get(i)).build();
+
+ // Remove a subscriber
+ if (i == 10) {
+ final var k = subscribers.firstEntry().getKey();
+ streamMediator.unsubscribe(k);
+ }
+
+ if (i == 60) {
+ final var k = subscribers.firstEntry().getKey();
+ streamMediator.unsubscribe(k);
+ }
+
+ // Add a new subscriber
+ if (i == 51) {
+ blockStreamService.subscribeBlockStream(
+ subscribeStreamRequest, subscribeStreamObserver4);
+ }
+
+ // Transmit the BlockItem
+ streamObserver.onNext(publishStreamRequest);
+
+ if (i == 70) {
+ final var k = subscribers.firstEntry().getKey();
+ streamMediator.unsubscribe(k);
+ }
+
+ // Add a new subscriber
+ if (i == 76) {
+ blockStreamService.subscribeBlockStream(
+ subscribeStreamRequest, subscribeStreamObserver5);
+ }
+
+ // Add a new subscriber
+ if (i == 88) {
+ blockStreamService.subscribeBlockStream(
+ subscribeStreamRequest, subscribeStreamObserver6);
+ }
+ }
+
+ // Verify subscribers who were listening before the stream started
+ verifySubscribeStreamResponse(numberOfBlocks, 0, 10, subscribeStreamObserver1, blockItems);
+ verifySubscribeStreamResponse(numberOfBlocks, 0, 60, subscribeStreamObserver2, blockItems);
+ verifySubscribeStreamResponse(numberOfBlocks, 0, 70, subscribeStreamObserver3, blockItems);
+
+ // Verify subscribers added while the stream was in progress.
+ // The Helidon-provided StreamObserver onNext() method will only
+ // be called once a Header BlockItem is reached. So, pass in
+ // the number of BlockItems to wait to verify that the method
+ // was called.
+ verifySubscribeStreamResponse(
+ numberOfBlocks, 59, numberOfBlocks, subscribeStreamObserver4, blockItems);
+ verifySubscribeStreamResponse(
+ numberOfBlocks, 79, numberOfBlocks, subscribeStreamObserver5, blockItems);
+ verifySubscribeStreamResponse(
+ numberOfBlocks, 89, numberOfBlocks, subscribeStreamObserver6, blockItems);
+
+ streamObserver.onCompleted();
+ }
+
+ @Test
+ public void testMediatorExceptionHandlingWhenPersistenceFailure() throws IOException {
+ final ConcurrentHashMap<
+ EventHandler>,
+ BatchEventProcessor>>
+ subscribers = new ConcurrentHashMap<>();
+
+ // Initialize the underlying BlockReader and BlockWriter with ineffective
+ // permissions to repair the file system. The BlockWriter will not be able
+ // to write the BlockItem or fix the permissions causing the BlockReader to
+ // throw an IOException.
+ final ServiceStatus serviceStatus = new ServiceStatusImpl();
+ serviceStatus.setWebServer(webServer);
+
+ final var streamMediator = buildStreamMediator(subscribers, TestUtils.getNoPerms());
+ final var blockStreamService =
+ buildBlockStreamService(streamMediator, blockReader, serviceStatus);
+
+ // Subscribe the consumers
+ blockStreamService.subscribeBlockStream(subscribeStreamRequest, subscribeStreamObserver1);
+ blockStreamService.subscribeBlockStream(subscribeStreamRequest, subscribeStreamObserver2);
+ blockStreamService.subscribeBlockStream(subscribeStreamRequest, subscribeStreamObserver3);
+
+ // Initialize the producer
+ final StreamObserver streamObserver =
+ blockStreamService.publishBlockStream(publishStreamResponseObserver);
+
+ // Change the permissions on the file system to trigger an
+ // IOException when the BlockPersistenceHandler tries to write
+ // the first BlockItem to the file system.
+ removeRootPathWritePerms(testConfig);
+
+ // Transmit a BlockItem
+ final List blockItems = generateBlockItems(1);
+ final PublishStreamRequest publishStreamRequest =
+ PublishStreamRequest.newBuilder().setBlockItem(blockItems.getFirst()).build();
+ streamObserver.onNext(publishStreamRequest);
+
+ // Simulate another producer attempting to connect to the Block Node after the exception.
+ // Later, verify they received a response indicating the stream is closed.
+ final StreamObserver expectedNoOpStreamObserver =
+ blockStreamService.publishBlockStream(publishStreamResponseObserver);
+ expectedNoOpStreamObserver.onNext(publishStreamRequest);
+
+ // Build a request to invoke the singleBlock service
+ final SingleBlockRequest singleBlockRequest =
+ SingleBlockRequest.newBuilder().setBlockNumber(1).build();
+ // Simulate a consumer attempting to connect to the Block Node after the exception.
+ blockStreamService.singleBlock(singleBlockRequest, singleBlockResponseStreamObserver);
+
+ // Build a request to invoke the subscribeBlockStream service
+ final SubscribeStreamRequest subscribeStreamRequest =
+ SubscribeStreamRequest.newBuilder().setStartBlockNumber(1).build();
+ // Simulate a consumer attempting to connect to the Block Node after the exception.
+ blockStreamService.subscribeBlockStream(subscribeStreamRequest, subscribeStreamObserver4);
+
+ // The BlockItem passed through since it was published
+ // before the IOException was thrown.
+ final SubscribeStreamResponse subscribeStreamResponse =
+ SubscribeStreamResponse.newBuilder().setBlockItem(blockItems.getFirst()).build();
+ verify(subscribeStreamObserver1, timeout(testTimeout).times(1))
+ .onNext(subscribeStreamResponse);
+ verify(subscribeStreamObserver2, timeout(testTimeout).times(1))
+ .onNext(subscribeStreamResponse);
+ verify(subscribeStreamObserver3, timeout(testTimeout).times(1))
+ .onNext(subscribeStreamResponse);
+
+ // Verify all the consumers received the end of stream response
+ // TODO: Fix the response code when it's available
+ final SubscribeStreamResponse endStreamResponse =
+ SubscribeStreamResponse.newBuilder()
+ .setStatus(
+ SubscribeStreamResponse.SubscribeStreamResponseCode
+ .READ_STREAM_SUCCESS)
+ .build();
+ verify(subscribeStreamObserver1, timeout(testTimeout).times(1)).onNext(endStreamResponse);
+ verify(subscribeStreamObserver2, timeout(testTimeout).times(1)).onNext(endStreamResponse);
+ verify(subscribeStreamObserver3, timeout(testTimeout).times(1)).onNext(endStreamResponse);
+
+ // Verify all the consumers were unsubscribed
+ for (final var s : subscribers.keySet()) {
+ assertFalse(streamMediator.isSubscribed(s));
+ }
+
+ // Verify the publishBlockStream service returned the expected
+ // error code indicating the service is not available.
+ final EndOfStream endOfStream =
+ EndOfStream.newBuilder()
+ .setStatus(PublishStreamResponseCode.STREAM_ITEMS_UNKNOWN)
+ .build();
+ final var endOfStreamResponse =
+ PublishStreamResponse.newBuilder().setStatus(endOfStream).build();
+ verify(publishStreamResponseObserver, timeout(testTimeout).times(2))
+ .onNext(endOfStreamResponse);
+ verify(webServer, timeout(testTimeout).times(1)).stop();
+
+ // Now verify the block was removed from the file system.
+ final BlockReader blockReader =
+ BlockAsDirReaderBuilder.newBuilder(JUNIT, testConfig).build();
+ final Optional blockOpt = blockReader.read(1);
+ assertTrue(blockOpt.isEmpty());
+
+ // Verify the singleBlock service returned the expected
+ // error code indicating the service is not available.
+ final SingleBlockResponse expectedSingleBlockNotAvailable =
+ SingleBlockResponse.newBuilder()
+ .setStatus(
+ SingleBlockResponse.SingleBlockResponseCode
+ .READ_BLOCK_NOT_AVAILABLE)
+ .build();
+ verify(singleBlockResponseStreamObserver, timeout(testTimeout).times(1))
+ .onNext(expectedSingleBlockNotAvailable);
+
+ // TODO: Fix the response code when it's available
+ final SubscribeStreamResponse expectedSubscriberStreamNotAvailable =
+ SubscribeStreamResponse.newBuilder()
+ .setStatus(
+ SubscribeStreamResponse.SubscribeStreamResponseCode
+ .READ_STREAM_SUCCESS)
+ .build();
+ verify(subscribeStreamObserver4, timeout(testTimeout).times(1))
+ .onNext(expectedSubscriberStreamNotAvailable);
+ }
+
+ private void removeRootPathWritePerms(final Config config) throws IOException {
+ final Path blockNodeRootPath = Path.of(config.get(JUNIT).asString().get());
+ Files.setPosixFilePermissions(blockNodeRootPath, TestUtils.getNoWrite().value());
+ }
+
+ private static void verifySubscribeStreamResponse(
+ int numberOfBlocks,
+ int blockItemsToWait,
+ int blockItemsToSkip,
+ StreamObserver streamObserver,
+ List blockItems) {
+
+ // Each block has 10 BlockItems. Verify all the BlockItems
+ // in a given block per iteration.
+ for (int block = 0; block < numberOfBlocks; block += 10) {
+
+ if (block < blockItemsToWait || block >= blockItemsToSkip) {
+ continue;
+ }
+
+ final BlockItem headerBlockItem = blockItems.get(block);
+ final SubscribeStreamResponse headerSubStreamResponse =
+ buildSubscribeStreamResponse(headerBlockItem);
+
+ final BlockItem bodyBlockItem = blockItems.get(block + 1);
+ final SubscribeStreamResponse bodySubStreamResponse =
+ buildSubscribeStreamResponse(bodyBlockItem);
+
+ final BlockItem stateProofBlockItem = blockItems.get(block + 9);
+ final SubscribeStreamResponse stateProofStreamResponse =
+ buildSubscribeStreamResponse(stateProofBlockItem);
+
+ verify(streamObserver, timeout(testTimeout).times(1)).onNext(headerSubStreamResponse);
+ verify(streamObserver, timeout(testTimeout).times(8)).onNext(bodySubStreamResponse);
+ verify(streamObserver, timeout(testTimeout).times(1)).onNext(stateProofStreamResponse);
+ }
+ }
+
+ private static SubscribeStreamResponse buildSubscribeStreamResponse(BlockItem blockItem) {
+ return SubscribeStreamResponse.newBuilder().setBlockItem(blockItem).build();
+ }
+
+ private BlockStreamService buildBlockStreamService() throws IOException {
+ final var streamMediator =
+ buildStreamMediator(new ConcurrentHashMap<>(32), Util.defaultPerms);
+
+ return buildBlockStreamService(streamMediator, blockReader, serviceStatus);
+ }
+
+ private StreamMediator> buildStreamMediator(
+ final Map<
+ EventHandler>,
+ BatchEventProcessor>>
+ subscribers,
+ final FileAttribute> filePerms)
+ throws IOException {
+
+ // Initialize with concrete a concrete BlockReader, BlockWriter and Mediator
+ final BlockRemover blockRemover =
+ new BlockAsDirRemover(
+ Path.of(testConfig.get(JUNIT).asString().get()), Util.defaultPerms);
+
+ final BlockNodeContext blockNodeContext = BlockNodeContextFactory.create();
+ final BlockWriter blockWriter =
+ BlockAsDirWriterBuilder.newBuilder(JUNIT, testConfig, blockNodeContext)
+ .blockRemover(blockRemover)
+ .filePerms(filePerms)
+ .build();
+
+ final ServiceStatus serviceStatus = new ServiceStatusImpl();
+ serviceStatus.setWebServer(webServer);
+
+ return LiveStreamMediatorBuilder.newBuilder(blockWriter, blockNodeContext, serviceStatus)
+ .subscribers(subscribers)
+ .build();
+ }
+
+ private BlockStreamService buildBlockStreamService(
+ final StreamMediator> streamMediator,
+ final BlockReader blockReader,
+ final ServiceStatus serviceStatus)
+ throws IOException {
+
+ final BlockNodeContext blockNodeContext = BlockNodeContextFactory.create();
+ return new BlockStreamService(
+ 2000,
+ new ItemAckBuilder(),
+ streamMediator,
+ blockReader,
+ serviceStatus,
+ blockNodeContext);
+ }
+}
diff --git a/server/src/test/java/com/hedera/block/server/BlockStreamServiceTest.java b/server/src/test/java/com/hedera/block/server/BlockStreamServiceTest.java
index fffb58847..277185764 100644
--- a/server/src/test/java/com/hedera/block/server/BlockStreamServiceTest.java
+++ b/server/src/test/java/com/hedera/block/server/BlockStreamServiceTest.java
@@ -16,59 +16,278 @@
package com.hedera.block.server;
-import static org.junit.jupiter.api.Assertions.*;
+import static com.hedera.block.protos.BlockStreamService.*;
+import static com.hedera.block.server.BlockStreamService.buildSingleBlockNotAvailableResponse;
+import static com.hedera.block.server.BlockStreamService.buildSingleBlockNotFoundResponse;
+import static com.hedera.block.server.Constants.*;
+import static com.hedera.block.server.util.PersistTestUtils.generateBlockItems;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.fail;
+import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.*;
-import com.hedera.block.protos.BlockStreamServiceGrpcProto;
+import com.google.protobuf.Descriptors;
+import com.hedera.block.server.config.BlockNodeContext;
+import com.hedera.block.server.config.BlockNodeContextFactory;
+import com.hedera.block.server.data.ObjectEvent;
import com.hedera.block.server.mediator.StreamMediator;
-import com.hedera.block.server.persistence.BlockPersistenceHandler;
+import com.hedera.block.server.persistence.storage.read.BlockAsDirReaderBuilder;
+import com.hedera.block.server.persistence.storage.read.BlockReader;
+import com.hedera.block.server.persistence.storage.write.BlockAsDirWriterBuilder;
+import com.hedera.block.server.persistence.storage.write.BlockWriter;
+import com.hedera.block.server.producer.ItemAckBuilder;
+import com.hedera.block.server.util.TestUtils;
+import io.grpc.stub.ServerCalls;
import io.grpc.stub.StreamObserver;
+import io.helidon.config.Config;
+import io.helidon.config.MapConfigSource;
+import io.helidon.config.spi.ConfigSource;
+import io.helidon.webserver.grpc.GrpcService;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.security.NoSuchAlgorithmException;
+import java.util.List;
+import java.util.Map;
import java.util.Optional;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
@ExtendWith(MockitoExtension.class)
-class BlockStreamServiceTest {
+public class BlockStreamServiceTest {
- private final long TIMEOUT_THRESHOLD_MILLIS = 52L;
+ private final long TIMEOUT_THRESHOLD_MILLIS = 50L;
- @Mock private StreamObserver responseObserver;
+ @Mock private StreamObserver responseObserver;
- @Mock
- private BlockPersistenceHandler blockPersistenceHandler;
+ @Mock private ItemAckBuilder itemAckBuilder;
- @Mock
- private StreamMediator<
- BlockStreamServiceGrpcProto.Block, BlockStreamServiceGrpcProto.BlockResponse>
- streamMediator;
+ @Mock private StreamMediator> streamMediator;
+
+ @Mock private BlockReader blockReader;
+
+ @Mock private ServiceStatus serviceStatus;
+
+ private final System.Logger LOGGER = System.getLogger(getClass().getName());
+
+ private static final String TEMP_DIR = "block-node-unit-test-dir";
+ private static final String JUNIT = "my-junit-test";
+
+ private Path testPath;
+ private Config testConfig;
+
+ @BeforeEach
+ public void setUp() throws IOException {
+ testPath = Files.createTempDirectory(TEMP_DIR);
+ LOGGER.log(System.Logger.Level.INFO, "Created temp directory: " + testPath.toString());
+
+ Map testProperties = Map.of(JUNIT, testPath.toString());
+ ConfigSource testConfigSource = MapConfigSource.builder().map(testProperties).build();
+ testConfig = Config.builder(testConfigSource).build();
+ }
+
+ @AfterEach
+ public void tearDown() {
+ TestUtils.deleteDirectory(testPath.toFile());
+ }
+
+ @Test
+ public void testServiceName() throws IOException, NoSuchAlgorithmException {
+
+ final BlockNodeContext blockNodeContext = BlockNodeContextFactory.create();
+ final BlockStreamService blockStreamService =
+ new BlockStreamService(
+ TIMEOUT_THRESHOLD_MILLIS,
+ itemAckBuilder,
+ streamMediator,
+ blockReader,
+ serviceStatus,
+ blockNodeContext);
+
+ // Verify the service name
+ assertEquals(Constants.SERVICE_NAME, blockStreamService.serviceName());
+
+ // Verify other methods not invoked
+ verify(itemAckBuilder, never()).buildAck(any(BlockItem.class));
+ verify(streamMediator, never()).publish(any(BlockItem.class));
+ }
@Test
- void getBlockHappyPath() {
- BlockStreamServiceGrpcProto.Block block =
- BlockStreamServiceGrpcProto.Block.newBuilder().setId(1).build();
- BlockStreamService blockStreamService =
+ public void testProto() throws IOException, NoSuchAlgorithmException {
+ final BlockNodeContext blockNodeContext = BlockNodeContextFactory.create();
+ final BlockStreamService blockStreamService =
new BlockStreamService(
- TIMEOUT_THRESHOLD_MILLIS, streamMediator, blockPersistenceHandler);
- when(blockPersistenceHandler.read(1))
- .thenReturn(
- Optional.of(
- BlockStreamServiceGrpcProto.Block.newBuilder().setId(1).build()));
- blockStreamService.getBlock(block, responseObserver);
- verify(responseObserver, times(1)).onNext(block);
+ TIMEOUT_THRESHOLD_MILLIS,
+ itemAckBuilder,
+ streamMediator,
+ blockReader,
+ serviceStatus,
+ blockNodeContext);
+ Descriptors.FileDescriptor fileDescriptor = blockStreamService.proto();
+
+ // Verify the current rpc methods
+ assertEquals(3, fileDescriptor.getServices().getFirst().getMethods().size());
+
+ // Verify other methods not invoked
+ verify(itemAckBuilder, never()).buildAck(any(BlockItem.class));
+ verify(streamMediator, never()).publish(any(BlockItem.class));
}
@Test
- void getBlockErrorPath() {
- BlockStreamServiceGrpcProto.Block block =
- BlockStreamServiceGrpcProto.Block.newBuilder().setId(1).build();
- BlockStreamService blockStreamService =
+ void testSingleBlockHappyPath() throws IOException {
+
+ final BlockReader blockReader =
+ BlockAsDirReaderBuilder.newBuilder(JUNIT, testConfig).build();
+ final BlockNodeContext blockNodeContext = BlockNodeContextFactory.create();
+ final BlockStreamService blockStreamService =
new BlockStreamService(
- TIMEOUT_THRESHOLD_MILLIS, streamMediator, blockPersistenceHandler);
- when(blockPersistenceHandler.read(1)).thenReturn(Optional.empty());
- blockStreamService.getBlock(block, responseObserver);
- verify(responseObserver, times(1))
- .onNext(BlockStreamServiceGrpcProto.Block.newBuilder().setId(0).build());
+ TIMEOUT_THRESHOLD_MILLIS,
+ itemAckBuilder,
+ streamMediator,
+ blockReader,
+ serviceStatus,
+ blockNodeContext);
+
+ // Enable the serviceStatus
+ when(serviceStatus.isRunning()).thenReturn(true);
+
+ // Generate and persist a block
+ final BlockWriter blockWriter =
+ BlockAsDirWriterBuilder.newBuilder(JUNIT, testConfig, blockNodeContext).build();
+ final List blockItems = generateBlockItems(1);
+ for (BlockItem blockItem : blockItems) {
+ blockWriter.write(blockItem);
+ }
+
+ // Get the block so we can verify the response payload
+ final Optional blockOpt = blockReader.read(1);
+ if (blockOpt.isEmpty()) {
+ fail("Block 1 should be present");
+ return;
+ }
+
+ // Build a response to verify what's passed to the response observer
+ final SingleBlockResponse expectedSingleBlockResponse =
+ SingleBlockResponse.newBuilder().setBlock(blockOpt.get()).build();
+
+ // Build a request to invoke the service
+ final SingleBlockRequest singleBlockRequest =
+ SingleBlockRequest.newBuilder().setBlockNumber(1).build();
+
+ // Call the service
+ blockStreamService.singleBlock(singleBlockRequest, responseObserver);
+ verify(responseObserver, times(1)).onNext(expectedSingleBlockResponse);
+ }
+
+ @Test
+ void testSingleBlockNotFoundPath() throws IOException {
+
+ final BlockNodeContext blockNodeContext = BlockNodeContextFactory.create();
+
+ // Get the block so we can verify the response payload
+ when(blockReader.read(1)).thenReturn(Optional.empty());
+
+ // Build a response to verify what's passed to the response observer
+ final SingleBlockResponse expectedNotFound = buildSingleBlockNotFoundResponse();
+
+ // Build a request to invoke the service
+ final SingleBlockRequest singleBlockRequest =
+ SingleBlockRequest.newBuilder().setBlockNumber(1).build();
+
+ // Call the service
+ final BlockStreamService blockStreamService =
+ new BlockStreamService(
+ TIMEOUT_THRESHOLD_MILLIS,
+ itemAckBuilder,
+ streamMediator,
+ blockReader,
+ serviceStatus,
+ blockNodeContext);
+
+ // Enable the serviceStatus
+ when(serviceStatus.isRunning()).thenReturn(true);
+
+ blockStreamService.singleBlock(singleBlockRequest, responseObserver);
+ verify(responseObserver, times(1)).onNext(expectedNotFound);
+ }
+
+ @Test
+ void testSingleBlockServiceNotAvailable() throws IOException {
+
+ final BlockNodeContext blockNodeContext = BlockNodeContextFactory.create();
+ final BlockStreamService blockStreamService =
+ new BlockStreamService(
+ TIMEOUT_THRESHOLD_MILLIS,
+ itemAckBuilder,
+ streamMediator,
+ blockReader,
+ serviceStatus,
+ blockNodeContext);
+
+ // Set the service status to not running
+ when(serviceStatus.isRunning()).thenReturn(false);
+
+ final SingleBlockResponse expectedNotAvailable = buildSingleBlockNotAvailableResponse();
+
+ // Build a request to invoke the service
+ final SingleBlockRequest singleBlockRequest =
+ SingleBlockRequest.newBuilder().setBlockNumber(1).build();
+ blockStreamService.singleBlock(singleBlockRequest, responseObserver);
+ verify(responseObserver, times(1)).onNext(expectedNotAvailable);
+ }
+
+ @Test
+ public void testSingleBlockIOExceptionPath() throws IOException {
+ final BlockNodeContext blockNodeContext = BlockNodeContextFactory.create();
+ final BlockStreamService blockStreamService =
+ new BlockStreamService(
+ TIMEOUT_THRESHOLD_MILLIS,
+ itemAckBuilder,
+ streamMediator,
+ blockReader,
+ serviceStatus,
+ blockNodeContext);
+
+ // Set the service status to not running
+ when(serviceStatus.isRunning()).thenReturn(true);
+ when(blockReader.read(1)).thenThrow(new IOException("Test exception"));
+
+ final SingleBlockResponse expectedNotAvailable = buildSingleBlockNotAvailableResponse();
+
+ // Build a request to invoke the service
+ final SingleBlockRequest singleBlockRequest =
+ SingleBlockRequest.newBuilder().setBlockNumber(1).build();
+ blockStreamService.singleBlock(singleBlockRequest, responseObserver);
+ verify(responseObserver, times(1)).onNext(expectedNotAvailable);
+ }
+
+ @Test
+ public void testUpdateInvokesRoutingWithLambdas() throws IOException {
+
+ final BlockNodeContext blockNodeContext = BlockNodeContextFactory.create();
+ final BlockStreamService blockStreamService =
+ new BlockStreamService(
+ TIMEOUT_THRESHOLD_MILLIS,
+ itemAckBuilder,
+ streamMediator,
+ blockReader,
+ serviceStatus,
+ blockNodeContext);
+
+ GrpcService.Routing routing = mock(GrpcService.Routing.class);
+ blockStreamService.update(routing);
+
+ verify(routing, timeout(50).times(1))
+ .bidi(eq(CLIENT_STREAMING_METHOD_NAME), any(ServerCalls.BidiStreamingMethod.class));
+ verify(routing, timeout(50).times(1))
+ .serverStream(
+ eq(SERVER_STREAMING_METHOD_NAME),
+ any(ServerCalls.ServerStreamingMethod.class));
+ verify(routing, timeout(50).times(1))
+ .unary(eq(SINGLE_BLOCK_METHOD_NAME), any(ServerCalls.UnaryMethod.class));
}
}
diff --git a/server/src/test/java/com/hedera/block/server/consumer/ConsumerStreamResponseObserverTest.java b/server/src/test/java/com/hedera/block/server/consumer/ConsumerStreamResponseObserverTest.java
new file mode 100644
index 000000000..bce43f3ea
--- /dev/null
+++ b/server/src/test/java/com/hedera/block/server/consumer/ConsumerStreamResponseObserverTest.java
@@ -0,0 +1,231 @@
+/*
+ * Copyright (C) 2024 Hedera Hashgraph, LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.hedera.block.server.consumer;
+
+import static com.hedera.block.protos.BlockStreamService.*;
+import static com.hedera.block.server.util.PersistTestUtils.generateBlockItems;
+import static org.mockito.Mockito.*;
+
+import com.hedera.block.server.data.ObjectEvent;
+import com.hedera.block.server.mediator.StreamMediator;
+import io.grpc.stub.ServerCallStreamObserver;
+import io.grpc.stub.StreamObserver;
+import java.time.InstantSource;
+import java.util.List;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.ExtendWith;
+import org.mockito.Mock;
+import org.mockito.junit.jupiter.MockitoExtension;
+
+@ExtendWith(MockitoExtension.class)
+public class ConsumerStreamResponseObserverTest {
+
+ private final long TIMEOUT_THRESHOLD_MILLIS = 50L;
+ private final long TEST_TIME = 1_719_427_664_950L;
+
+ @Mock private StreamMediator> streamMediator;
+ @Mock private StreamObserver responseStreamObserver;
+ @Mock private ObjectEvent objectEvent;
+
+ @Mock private ServerCallStreamObserver serverCallStreamObserver;
+ @Mock private InstantSource testClock;
+
+ @Test
+ public void testProducerTimeoutWithinWindow() {
+
+ when(testClock.millis()).thenReturn(TEST_TIME, TEST_TIME + TIMEOUT_THRESHOLD_MILLIS);
+
+ final var consumerBlockItemObserver =
+ new ConsumerStreamResponseObserver(
+ TIMEOUT_THRESHOLD_MILLIS,
+ testClock,
+ streamMediator,
+ responseStreamObserver);
+
+ final BlockHeader blockHeader = BlockHeader.newBuilder().setBlockNumber(1).build();
+ final BlockItem blockItem = BlockItem.newBuilder().setHeader(blockHeader).build();
+ final SubscribeStreamResponse subscribeStreamResponse =
+ SubscribeStreamResponse.newBuilder().setBlockItem(blockItem).build();
+
+ when(objectEvent.get()).thenReturn(subscribeStreamResponse);
+
+ consumerBlockItemObserver.onEvent(objectEvent, 0, true);
+
+ // verify the observer is called with the next BlockItem
+ verify(responseStreamObserver).onNext(subscribeStreamResponse);
+
+ // verify the mediator is NOT called to unsubscribe the observer
+ verify(streamMediator, never()).unsubscribe(consumerBlockItemObserver);
+ }
+
+ @Test
+ public void testProducerTimeoutOutsideWindow() throws InterruptedException {
+
+ // Mock a clock with 2 different return values in response to anticipated
+ // millis() calls. Here the second call will always be outside the timeout window.
+ when(testClock.millis()).thenReturn(TEST_TIME, TEST_TIME + TIMEOUT_THRESHOLD_MILLIS + 1);
+
+ final var consumerBlockItemObserver =
+ new ConsumerStreamResponseObserver(
+ TIMEOUT_THRESHOLD_MILLIS,
+ testClock,
+ streamMediator,
+ responseStreamObserver);
+
+ consumerBlockItemObserver.onEvent(objectEvent, 0, true);
+ verify(streamMediator).unsubscribe(consumerBlockItemObserver);
+ }
+
+ @Test
+ public void testHandlersSetOnObserver() throws InterruptedException {
+
+ // Mock a clock with 2 different return values in response to anticipated
+ // millis() calls. Here the second call will always be inside the timeout window.
+ when(testClock.millis()).thenReturn(TEST_TIME, TEST_TIME + TIMEOUT_THRESHOLD_MILLIS);
+
+ new ConsumerStreamResponseObserver(
+ TIMEOUT_THRESHOLD_MILLIS, testClock, streamMediator, serverCallStreamObserver);
+
+ verify(serverCallStreamObserver, timeout(50).times(1)).setOnCloseHandler(any());
+ verify(serverCallStreamObserver, timeout(50).times(1)).setOnCancelHandler(any());
+ }
+
+ @Test
+ public void testResponseNotPermittedAfterCancel() {
+
+ final TestConsumerStreamResponseObserver consumerStreamResponseObserver =
+ new TestConsumerStreamResponseObserver(
+ TIMEOUT_THRESHOLD_MILLIS,
+ testClock,
+ streamMediator,
+ serverCallStreamObserver);
+
+ final List blockItems = generateBlockItems(1);
+ final SubscribeStreamResponse subscribeStreamResponse =
+ SubscribeStreamResponse.newBuilder().setBlockItem(blockItems.getFirst()).build();
+ when(objectEvent.get()).thenReturn(subscribeStreamResponse);
+
+ // Confirm that the observer is called with the first BlockItem
+ consumerStreamResponseObserver.onEvent(objectEvent, 0, true);
+
+ // Cancel the observer
+ consumerStreamResponseObserver.cancel();
+
+ // Attempt to send another BlockItem
+ consumerStreamResponseObserver.onEvent(objectEvent, 0, true);
+
+ // Confirm that canceling the observer allowed only 1 response to be sent.
+ verify(serverCallStreamObserver, timeout(50).times(1)).onNext(subscribeStreamResponse);
+ }
+
+ @Test
+ public void testResponseNotPermittedAfterClose() {
+
+ final TestConsumerStreamResponseObserver consumerStreamResponseObserver =
+ new TestConsumerStreamResponseObserver(
+ TIMEOUT_THRESHOLD_MILLIS,
+ testClock,
+ streamMediator,
+ serverCallStreamObserver);
+
+ final List blockItems = generateBlockItems(1);
+ final SubscribeStreamResponse subscribeStreamResponse =
+ SubscribeStreamResponse.newBuilder().setBlockItem(blockItems.getFirst()).build();
+ when(objectEvent.get()).thenReturn(subscribeStreamResponse);
+
+ // Confirm that the observer is called with the first BlockItem
+ consumerStreamResponseObserver.onEvent(objectEvent, 0, true);
+
+ // Close the observer
+ consumerStreamResponseObserver.close();
+
+ // Attempt to send another BlockItem
+ consumerStreamResponseObserver.onEvent(objectEvent, 0, true);
+
+ // Confirm that canceling the observer allowed only 1 response to be sent.
+ verify(serverCallStreamObserver, timeout(50).times(1)).onNext(subscribeStreamResponse);
+ }
+
+ @Test
+ public void testConsumerNotToSendBeforeBlockHeader() {
+
+ // Mock a clock with 2 different return values in response to anticipated
+ // millis() calls. Here the second call will always be inside the timeout window.
+ when(testClock.millis()).thenReturn(TEST_TIME, TEST_TIME + TIMEOUT_THRESHOLD_MILLIS);
+
+ final var consumerBlockItemObserver =
+ new ConsumerStreamResponseObserver(
+ TIMEOUT_THRESHOLD_MILLIS,
+ testClock,
+ streamMediator,
+ responseStreamObserver);
+
+ // Send non-header BlockItems to validate that the observer does not send them
+ for (int i = 1; i <= 10; i++) {
+
+ if (i % 2 == 0) {
+ final EventMetadata eventMetadata =
+ EventMetadata.newBuilder().setCreatorId(i).build();
+ final BlockItem blockItem =
+ BlockItem.newBuilder().setStartEvent(eventMetadata).build();
+ final SubscribeStreamResponse subscribeStreamResponse =
+ SubscribeStreamResponse.newBuilder().setBlockItem(blockItem).build();
+ when(objectEvent.get()).thenReturn(subscribeStreamResponse);
+ } else {
+ final BlockProof blockProof = BlockProof.newBuilder().setBlock(i).build();
+ final BlockItem blockItem =
+ BlockItem.newBuilder().setStateProof(blockProof).build();
+ final SubscribeStreamResponse subscribeStreamResponse =
+ SubscribeStreamResponse.newBuilder().setBlockItem(blockItem).build();
+ when(objectEvent.get()).thenReturn(subscribeStreamResponse);
+ }
+
+ consumerBlockItemObserver.onEvent(objectEvent, 0, true);
+ }
+
+ final BlockItem blockItem = BlockItem.newBuilder().build();
+ final SubscribeStreamResponse subscribeStreamResponse =
+ SubscribeStreamResponse.newBuilder().setBlockItem(blockItem).build();
+
+ // Confirm that the observer was called with the next BlockItem
+ // since we never send a BlockItem with a Header to start the stream.
+ verify(responseStreamObserver, timeout(50).times(0)).onNext(subscribeStreamResponse);
+ }
+
+ private static class TestConsumerStreamResponseObserver extends ConsumerStreamResponseObserver {
+
+ public TestConsumerStreamResponseObserver(
+ long timeoutThresholdMillis,
+ InstantSource producerLivenessClock,
+ StreamMediator> subscriptionHandler,
+ StreamObserver subscribeStreamResponseObserver) {
+ super(
+ timeoutThresholdMillis,
+ producerLivenessClock,
+ subscriptionHandler,
+ subscribeStreamResponseObserver);
+ }
+
+ public void cancel() {
+ onCancel.run();
+ }
+
+ public void close() {
+ onClose.run();
+ }
+ }
+}
diff --git a/server/src/test/java/com/hedera/block/server/consumer/LiveStreamObserverImplTest.java b/server/src/test/java/com/hedera/block/server/consumer/LiveStreamObserverImplTest.java
deleted file mode 100644
index 99af21cf5..000000000
--- a/server/src/test/java/com/hedera/block/server/consumer/LiveStreamObserverImplTest.java
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Copyright (C) 2024 Hedera Hashgraph, LLC
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.hedera.block.server.consumer;
-
-import com.hedera.block.protos.BlockStreamServiceGrpcProto;
-import com.hedera.block.server.mediator.StreamMediator;
-import io.grpc.stub.StreamObserver;
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.extension.ExtendWith;
-import org.mockito.Mock;
-import org.mockito.junit.jupiter.MockitoExtension;
-
-import java.time.Clock;
-import java.time.Instant;
-import java.time.InstantSource;
-import java.time.ZoneId;
-
-import static org.mockito.Mockito.*;
-
-@ExtendWith(MockitoExtension.class)
-public class LiveStreamObserverImplTest {
-
- private final long TIMEOUT_THRESHOLD_MILLIS = 50L;
- private final long TEST_TIME = 1_719_427_664_950L;
-
- @Mock
- private StreamMediator streamMediator;
-
- @Mock
- private StreamObserver responseStreamObserver;
-
-
- @Test
- public void testConsumerTimeoutWithinWindow() {
- final LiveStreamObserver liveStreamObserver = new LiveStreamObserverImpl(
- TIMEOUT_THRESHOLD_MILLIS,
- buildClockInsideWindow(TEST_TIME, TIMEOUT_THRESHOLD_MILLIS),
- buildClockInsideWindow(TEST_TIME, TIMEOUT_THRESHOLD_MILLIS),
- streamMediator,
- responseStreamObserver);
- BlockStreamServiceGrpcProto.Block newBlock = BlockStreamServiceGrpcProto.Block.newBuilder().build();
- liveStreamObserver.notify(newBlock);
-
- // verify the observer is called with the next
- // block and the stream mediator is not unsubscribed
- verify(responseStreamObserver).onNext(newBlock);
- verify(streamMediator, never()).unsubscribe(liveStreamObserver);
- }
-
- @Test
- public void testConsumerTimeoutOutsideWindow() throws InterruptedException {
-
- final LiveStreamObserver liveStreamObserver = new LiveStreamObserverImpl(
- TIMEOUT_THRESHOLD_MILLIS,
- buildClockOutsideWindow(TEST_TIME, TIMEOUT_THRESHOLD_MILLIS),
- buildClockOutsideWindow(TEST_TIME, TIMEOUT_THRESHOLD_MILLIS),
- streamMediator,
- responseStreamObserver);
-
- final BlockStreamServiceGrpcProto.Block newBlock = BlockStreamServiceGrpcProto.Block.newBuilder().build();
- when(streamMediator.isSubscribed(liveStreamObserver)).thenReturn(true);
- liveStreamObserver.notify(newBlock);
- verify(streamMediator).unsubscribe(liveStreamObserver);
- }
-
- @Test
- public void testProducerTimeoutWithinWindow() {
- final LiveStreamObserver liveStreamObserver = new LiveStreamObserverImpl(
- TIMEOUT_THRESHOLD_MILLIS,
- buildClockInsideWindow(TEST_TIME, TIMEOUT_THRESHOLD_MILLIS),
- buildClockInsideWindow(TEST_TIME, TIMEOUT_THRESHOLD_MILLIS),
- streamMediator,
- responseStreamObserver);
-
- BlockStreamServiceGrpcProto.BlockResponse blockResponse = BlockStreamServiceGrpcProto.BlockResponse.newBuilder().build();
- liveStreamObserver.onNext(blockResponse);
-
- // verify the mediator is NOT called to unsubscribe the observer
- verify(streamMediator, never()).unsubscribe(liveStreamObserver);
- }
-
- @Test
- public void testProducerTimeoutOutsideWindow() throws InterruptedException {
- final LiveStreamObserver liveStreamObserver = new LiveStreamObserverImpl(
- TIMEOUT_THRESHOLD_MILLIS,
- buildClockOutsideWindow(TEST_TIME, TIMEOUT_THRESHOLD_MILLIS),
- buildClockOutsideWindow(TEST_TIME, TIMEOUT_THRESHOLD_MILLIS),
- streamMediator,
- responseStreamObserver);
-
- Thread.sleep(51);
- BlockStreamServiceGrpcProto.BlockResponse blockResponse = BlockStreamServiceGrpcProto.BlockResponse.newBuilder().build();
- liveStreamObserver.onNext(blockResponse);
-
- verify(streamMediator).unsubscribe(liveStreamObserver);
- }
-
- private static InstantSource buildClockInsideWindow(long testTime, long timeoutThresholdMillis) {
- return new TestClock(testTime, testTime + timeoutThresholdMillis - 1);
- }
-
- private static InstantSource buildClockOutsideWindow(long testTime, long timeoutThresholdMillis) {
- return new TestClock(testTime, testTime + timeoutThresholdMillis + 1);
- }
-
- static class TestClock implements InstantSource {
-
- private int index;
- private final Long[] millis;
-
- TestClock(Long... millis) {
- this.millis = millis;
- }
-
- @Override
- public long millis() {
- long value = millis[index];
-
- // cycle through the provided millis
- // and wrap around if necessary
- index = index > millis.length - 1 ? 0 : index + 1;
- return value;
- }
-
- @Override
- public Instant instant() {
- return null;
- }
- }
-}
diff --git a/server/src/test/java/com/hedera/block/server/mediator/LiveStreamMediatorImplTest.java b/server/src/test/java/com/hedera/block/server/mediator/LiveStreamMediatorImplTest.java
index c467bb919..0227d6b4e 100644
--- a/server/src/test/java/com/hedera/block/server/mediator/LiveStreamMediatorImplTest.java
+++ b/server/src/test/java/com/hedera/block/server/mediator/LiveStreamMediatorImplTest.java
@@ -16,103 +16,367 @@
package com.hedera.block.server.mediator;
+import static com.hedera.block.protos.BlockStreamService.*;
+import static com.hedera.block.server.util.PersistTestUtils.generateBlockItems;
+import static org.junit.jupiter.api.Assertions.*;
+import static org.mockito.Mockito.*;
-import com.hedera.block.protos.BlockStreamServiceGrpcProto;
-import com.hedera.block.server.consumer.LiveStreamObserver;
-import com.hedera.block.server.persistence.WriteThroughCacheHandler;
-import com.hedera.block.server.persistence.storage.BlockStorage;
+import com.hedera.block.server.ServiceStatusImpl;
+import com.hedera.block.server.config.BlockNodeContext;
+import com.hedera.block.server.config.BlockNodeContextFactory;
+import com.hedera.block.server.consumer.ConsumerStreamResponseObserver;
+import com.hedera.block.server.data.ObjectEvent;
+import com.hedera.block.server.persistence.storage.write.BlockWriter;
+import com.lmax.disruptor.EventHandler;
+import edu.umd.cs.findbugs.annotations.NonNull;
+import io.grpc.stub.ServerCallStreamObserver;
+import io.grpc.stub.StreamObserver;
+import java.io.IOException;
+import java.time.InstantSource;
+import java.util.List;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
-import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.mockito.Mockito.verify;
-
@ExtendWith(MockitoExtension.class)
public class LiveStreamMediatorImplTest {
- @Mock
- private LiveStreamObserver liveStreamObserver1;
+ @Mock private EventHandler> observer1;
+ @Mock private EventHandler> observer2;
+ @Mock private EventHandler> observer3;
+
+ @Mock private BlockWriter blockWriter;
+
+ @Mock private StreamObserver streamObserver1;
+ @Mock private StreamObserver streamObserver2;
+ @Mock private StreamObserver streamObserver3;
- @Mock
- private LiveStreamObserver liveStreamObserver2;
+ @Mock private ServerCallStreamObserver serverCallStreamObserver;
+ @Mock private InstantSource testClock;
- @Mock
- private LiveStreamObserver liveStreamObserver3;
+ private final long TIMEOUT_THRESHOLD_MILLIS = 100L;
+ private final long TEST_TIME = 1_719_427_664_950L;
- @Mock
- private BlockStorage blockStorage;
+ private static final int testTimeout = 200;
@Test
- public void testUnsubscribeEach() {
+ public void testUnsubscribeEach() throws InterruptedException, IOException {
- final StreamMediator streamMediator =
- new LiveStreamMediatorImpl(new WriteThroughCacheHandler(blockStorage));
+ final BlockNodeContext blockNodeContext = BlockNodeContextFactory.create();
+ final var streamMediatorBuilder =
+ LiveStreamMediatorBuilder.newBuilder(
+ blockWriter, blockNodeContext, new ServiceStatusImpl());
+ final var streamMediator = streamMediatorBuilder.build();
// Set up the subscribers
- streamMediator.subscribe(liveStreamObserver1);
- streamMediator.subscribe(liveStreamObserver2);
- streamMediator.subscribe(liveStreamObserver3);
+ streamMediator.subscribe(observer1);
+ streamMediator.subscribe(observer2);
+ streamMediator.subscribe(observer3);
+
+ assertTrue(
+ streamMediator.isSubscribed(observer1),
+ "Expected the mediator to have observer1 subscribed");
+ assertTrue(
+ streamMediator.isSubscribed(observer2),
+ "Expected the mediator to have observer2 subscribed");
+ assertTrue(
+ streamMediator.isSubscribed(observer3),
+ "Expected the mediator to have observer3 subscribed");
+
+ Thread.sleep(testTimeout);
- assertTrue(streamMediator.isSubscribed(liveStreamObserver1), "Expected the mediator to have liveStreamObserver1 subscribed");
- assertTrue(streamMediator.isSubscribed(liveStreamObserver2), "Expected the mediator to have liveStreamObserver2 subscribed");
- assertTrue(streamMediator.isSubscribed(liveStreamObserver3), "Expected the mediator to have liveStreamObserver3 subscribed");
+ streamMediator.unsubscribe(observer1);
+ assertFalse(
+ streamMediator.isSubscribed(observer1),
+ "Expected the mediator to have unsubscribed observer1");
- streamMediator.unsubscribe(liveStreamObserver1);
- assertFalse(streamMediator.isSubscribed(liveStreamObserver1), "Expected the mediator to have unsubscribed liveStreamObserver1");
+ streamMediator.unsubscribe(observer2);
+ assertFalse(
+ streamMediator.isSubscribed(observer2),
+ "Expected the mediator to have unsubscribed observer2");
- streamMediator.unsubscribe(liveStreamObserver2);
- assertFalse(streamMediator.isSubscribed(liveStreamObserver2), "Expected the mediator to have unsubscribed liveStreamObserver2");
+ streamMediator.unsubscribe(observer3);
+ assertFalse(
+ streamMediator.isSubscribed(observer3),
+ "Expected the mediator to have unsubscribed observer3");
- streamMediator.unsubscribe(liveStreamObserver3);
- assertFalse(streamMediator.isSubscribed(liveStreamObserver3), "Expected the mediator to have unsubscribed liveStreamObserver3");
+ // Confirm the counter was never incremented
+ assertEquals(0, blockNodeContext.metricsService().liveBlockItems.get());
}
@Test
- public void testMediatorPersistenceWithoutSubscribers() {
+ public void testMediatorPersistenceWithoutSubscribers() throws IOException {
- final StreamMediator streamMediator =
- new LiveStreamMediatorImpl(new WriteThroughCacheHandler(blockStorage));
-
- final BlockStreamServiceGrpcProto.Block newBlock = BlockStreamServiceGrpcProto.Block.newBuilder().build();
+ final BlockNodeContext blockNodeContext = BlockNodeContextFactory.create();
+ final var streamMediator =
+ LiveStreamMediatorBuilder.newBuilder(
+ blockWriter, blockNodeContext, new ServiceStatusImpl())
+ .build();
+ final BlockItem blockItem = BlockItem.newBuilder().build();
// Acting as a producer, notify the mediator of a new block
- streamMediator.notifyAll(newBlock);
+ streamMediator.publish(blockItem);
+
+ // Verify the counter was incremented
+ assertEquals(1, blockNodeContext.metricsService().liveBlockItems.get());
- // Confirm the block was persisted to storage
- // even though there are no subscribers
- verify(blockStorage).write(newBlock);
+ // Confirm the BlockStorage write method was
+ // called despite the absence of subscribers
+ verify(blockWriter, timeout(testTimeout).times(1)).write(blockItem);
}
@Test
- public void testMediatorNotifyAll() {
+ public void testMediatorPublishEventToSubscribers() throws IOException, InterruptedException {
+
+ final BlockNodeContext blockNodeContext = BlockNodeContextFactory.create();
+ final var streamMediator =
+ LiveStreamMediatorBuilder.newBuilder(
+ blockWriter, blockNodeContext, new ServiceStatusImpl())
+ .build();
+
+ when(testClock.millis()).thenReturn(TEST_TIME, TEST_TIME + TIMEOUT_THRESHOLD_MILLIS);
- final StreamMediator streamMediator =
- new LiveStreamMediatorImpl(new WriteThroughCacheHandler(blockStorage));
+ final var concreteObserver1 =
+ new ConsumerStreamResponseObserver(
+ TIMEOUT_THRESHOLD_MILLIS, testClock, streamMediator, streamObserver1);
+
+ final var concreteObserver2 =
+ new ConsumerStreamResponseObserver(
+ TIMEOUT_THRESHOLD_MILLIS, testClock, streamMediator, streamObserver2);
+
+ final var concreteObserver3 =
+ new ConsumerStreamResponseObserver(
+ TIMEOUT_THRESHOLD_MILLIS, testClock, streamMediator, streamObserver3);
// Set up the subscribers
- streamMediator.subscribe(liveStreamObserver1);
- streamMediator.subscribe(liveStreamObserver2);
- streamMediator.subscribe(liveStreamObserver3);
+ streamMediator.subscribe(concreteObserver1);
+ streamMediator.subscribe(concreteObserver2);
+ streamMediator.subscribe(concreteObserver3);
- assertTrue(streamMediator.isSubscribed(liveStreamObserver1), "Expected the mediator to have liveStreamObserver1 subscribed");
- assertTrue(streamMediator.isSubscribed(liveStreamObserver2), "Expected the mediator to have liveStreamObserver2 subscribed");
- assertTrue(streamMediator.isSubscribed(liveStreamObserver3), "Expected the mediator to have liveStreamObserver3 subscribed");
+ assertTrue(
+ streamMediator.isSubscribed(concreteObserver1),
+ "Expected the mediator to have observer1 subscribed");
+ assertTrue(
+ streamMediator.isSubscribed(concreteObserver2),
+ "Expected the mediator to have observer2 subscribed");
+ assertTrue(
+ streamMediator.isSubscribed(concreteObserver3),
+ "Expected the mediator to have observer3 subscribed");
- final BlockStreamServiceGrpcProto.Block newBlock = BlockStreamServiceGrpcProto.Block.newBuilder().build();
+ final BlockHeader blockHeader = BlockHeader.newBuilder().setBlockNumber(1).build();
+ final BlockItem blockItem = BlockItem.newBuilder().setHeader(blockHeader).build();
+ final SubscribeStreamResponse subscribeStreamResponse =
+ SubscribeStreamResponse.newBuilder().setBlockItem(blockItem).build();
// Acting as a producer, notify the mediator of a new block
- streamMediator.notifyAll(newBlock);
+ streamMediator.publish(blockItem);
+
+ assertEquals(1, blockNodeContext.metricsService().liveBlockItems.get());
// Confirm each subscriber was notified of the new block
- verify(liveStreamObserver1).notify(newBlock);
- verify(liveStreamObserver2).notify(newBlock);
- verify(liveStreamObserver3).notify(newBlock);
+ verify(streamObserver1, timeout(testTimeout).times(1)).onNext(subscribeStreamResponse);
+ verify(streamObserver2, timeout(testTimeout).times(1)).onNext(subscribeStreamResponse);
+ verify(streamObserver3, timeout(testTimeout).times(1)).onNext(subscribeStreamResponse);
+
+ // Confirm the BlockStorage write method was called
+ verify(blockWriter).write(blockItem);
+ }
+
+ @Test
+ public void testSubAndUnsubHandling() throws IOException {
+
+ final BlockNodeContext blockNodeContext = BlockNodeContextFactory.create();
+ final var streamMediator =
+ LiveStreamMediatorBuilder.newBuilder(
+ blockWriter, blockNodeContext, new ServiceStatusImpl())
+ .build();
+
+ when(testClock.millis()).thenReturn(TEST_TIME, TEST_TIME + TIMEOUT_THRESHOLD_MILLIS);
+
+ final var concreteObserver1 =
+ new ConsumerStreamResponseObserver(
+ TIMEOUT_THRESHOLD_MILLIS, testClock, streamMediator, streamObserver1);
+
+ final var concreteObserver2 =
+ new ConsumerStreamResponseObserver(
+ TIMEOUT_THRESHOLD_MILLIS, testClock, streamMediator, streamObserver2);
+
+ final var concreteObserver3 =
+ new ConsumerStreamResponseObserver(
+ TIMEOUT_THRESHOLD_MILLIS, testClock, streamMediator, streamObserver3);
+
+ // Set up the subscribers
+ streamMediator.subscribe(concreteObserver1);
+ streamMediator.subscribe(concreteObserver2);
+ streamMediator.subscribe(concreteObserver3);
+
+ streamMediator.unsubscribe(concreteObserver1);
+ streamMediator.unsubscribe(concreteObserver2);
+ streamMediator.unsubscribe(concreteObserver3);
+
+ // Confirm the counter was never incremented
+ assertEquals(0, blockNodeContext.metricsService().liveBlockItems.get());
+ }
+
+ @Test
+ public void testOnCancelSubscriptionHandling() throws IOException {
+
+ final BlockNodeContext blockNodeContext = BlockNodeContextFactory.create();
+ final var streamMediator =
+ LiveStreamMediatorBuilder.newBuilder(
+ blockWriter, blockNodeContext, new ServiceStatusImpl())
+ .build();
+
+ when(testClock.millis()).thenReturn(TEST_TIME, TEST_TIME + TIMEOUT_THRESHOLD_MILLIS);
+
+ final var testConsumerBlockItemObserver =
+ new TestConsumerStreamResponseObserver(
+ TIMEOUT_THRESHOLD_MILLIS,
+ testClock,
+ streamMediator,
+ serverCallStreamObserver);
+
+ streamMediator.subscribe(testConsumerBlockItemObserver);
+ assertTrue(streamMediator.isSubscribed(testConsumerBlockItemObserver));
+
+ // Simulate the producer notifying the mediator of a new block
+ final List blockItems = generateBlockItems(1);
+ streamMediator.publish(blockItems.getFirst());
- // Confirm the block was persisted to storage and cache
- verify(blockStorage).write(newBlock);
+ // Simulate the consumer cancelling the stream
+ testConsumerBlockItemObserver.getOnCancel().run();
+
+ // Verify the block item incremented the counter
+ assertEquals(1, blockNodeContext.metricsService().liveBlockItems.get());
+
+ // Verify the event made it to the consumer
+ verify(serverCallStreamObserver, timeout(testTimeout).times(1)).setOnCancelHandler(any());
+
+ // Confirm the mediator unsubscribed the consumer
+ assertFalse(streamMediator.isSubscribed(testConsumerBlockItemObserver));
}
+ @Test
+ public void testOnCloseSubscriptionHandling() throws IOException {
+
+ final BlockNodeContext blockNodeContext = BlockNodeContextFactory.create();
+ final var streamMediator =
+ LiveStreamMediatorBuilder.newBuilder(
+ blockWriter, blockNodeContext, new ServiceStatusImpl())
+ .build();
+
+ // testClock configured to be outside the timeout window
+ when(testClock.millis()).thenReturn(TEST_TIME, TEST_TIME + TIMEOUT_THRESHOLD_MILLIS + 1);
+
+ final var testConsumerBlockItemObserver =
+ new TestConsumerStreamResponseObserver(
+ TIMEOUT_THRESHOLD_MILLIS,
+ testClock,
+ streamMediator,
+ serverCallStreamObserver);
+
+ streamMediator.subscribe(testConsumerBlockItemObserver);
+ assertTrue(streamMediator.isSubscribed(testConsumerBlockItemObserver));
+
+ // Simulate the producer notifying the mediator of a new block
+ final List blockItems = generateBlockItems(1);
+ streamMediator.publish(blockItems.getFirst());
+
+ // Simulate the consumer completing the stream
+ testConsumerBlockItemObserver.getOnClose().run();
+
+ // Verify the block item incremented the counter
+ assertEquals(1, blockNodeContext.metricsService().liveBlockItems.get());
+
+ // Verify the event made it to the consumer
+ verify(serverCallStreamObserver, timeout(testTimeout).times(1)).setOnCancelHandler(any());
+
+ // Confirm the mediator unsubscribed the consumer
+ assertFalse(streamMediator.isSubscribed(testConsumerBlockItemObserver));
+ }
+
+ @Test
+ public void testMediatorBlocksPublishAfterException() throws IOException, InterruptedException {
+ final BlockNodeContext blockNodeContext = BlockNodeContextFactory.create();
+ final var streamMediator =
+ LiveStreamMediatorBuilder.newBuilder(
+ blockWriter, blockNodeContext, new ServiceStatusImpl())
+ .build();
+
+ final List blockItems = generateBlockItems(1);
+ final BlockItem firstBlockItem = blockItems.getFirst();
+
+ // Right now, only a single producer calls publishEvent. In
+ // that case, they will get an IOException bubbled up to them.
+ // However, we will need to support multiple producers in the
+ // future. In that case, we need to make sure a second producer
+ // is not able to publish a block after the first producer fails.
+ doThrow(new IOException()).when(blockWriter).write(firstBlockItem);
+ try {
+ streamMediator.publish(firstBlockItem);
+ fail("Expected an IOException to be thrown");
+ } catch (IOException e) {
+
+ final BlockItem secondBlockItem = blockItems.get(1);
+ streamMediator.publish(secondBlockItem);
+
+ // Confirm the counter was incremented only once
+ assertEquals(1, blockNodeContext.metricsService().liveBlockItems.get());
+
+ // Confirm the BlockPersistenceHandler write method was only called
+ // once despite the second block being published.
+ verify(blockWriter, timeout(testTimeout).times(1)).write(firstBlockItem);
+ }
+ }
+
+ @Test
+ public void testUnsubscribeWhenNotSubscribed() throws IOException {
+
+ final BlockNodeContext blockNodeContext = BlockNodeContextFactory.create();
+ final var streamMediator =
+ LiveStreamMediatorBuilder.newBuilder(
+ blockWriter, blockNodeContext, new ServiceStatusImpl())
+ .build();
+ final var testConsumerBlockItemObserver =
+ new TestConsumerStreamResponseObserver(
+ TIMEOUT_THRESHOLD_MILLIS,
+ testClock,
+ streamMediator,
+ serverCallStreamObserver);
+
+ // Confirm the observer is not subscribed
+ assertFalse(streamMediator.isSubscribed(testConsumerBlockItemObserver));
+
+ // Attempt to unsubscribe the observer
+ streamMediator.unsubscribe(testConsumerBlockItemObserver);
+
+ // Confirm the observer is still not subscribed
+ assertFalse(streamMediator.isSubscribed(testConsumerBlockItemObserver));
+ }
+
+ private static class TestConsumerStreamResponseObserver extends ConsumerStreamResponseObserver {
+ public TestConsumerStreamResponseObserver(
+ long timeoutThresholdMillis,
+ final InstantSource producerLivenessClock,
+ final StreamMediator>
+ streamMediator,
+ final StreamObserver responseStreamObserver) {
+ super(
+ timeoutThresholdMillis,
+ producerLivenessClock,
+ streamMediator,
+ responseStreamObserver);
+ }
+
+ @NonNull
+ public Runnable getOnCancel() {
+ return onCancel;
+ }
+
+ @NonNull
+ public Runnable getOnClose() {
+ return onClose;
+ }
+ }
}
diff --git a/server/src/test/java/com/hedera/block/server/persistence/RangeTest.java b/server/src/test/java/com/hedera/block/server/persistence/RangeTest.java
deleted file mode 100644
index ef6539a07..000000000
--- a/server/src/test/java/com/hedera/block/server/persistence/RangeTest.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright (C) 2024 Hedera Hashgraph, LLC
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.hedera.block.server.persistence;
-
-import com.hedera.block.protos.BlockStreamServiceGrpcProto;
-import com.hedera.block.server.persistence.storage.BlockStorage;
-import org.junit.jupiter.api.Test;
-
-import java.util.*;
-
-import static com.hedera.block.server.persistence.PersistTestUtils.generateBlocks;
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertNotNull;
-
-public class RangeTest {
-
- @Test
- public void testReadRangeWithEvenEntries() {
-
- int maxEntries = 100;
- int numOfBlocks = 100;
-
- BlockPersistenceHandler blockPersistenceHandler = generateInMemoryTestBlockPersistenceHandler(maxEntries);
- List blocks = generateBlocks(numOfBlocks);
- for (BlockStreamServiceGrpcProto.Block block : blocks) {
- blockPersistenceHandler.persist(block);
- }
-
- int window = 10;
- int numOfWindows = numOfBlocks / window;
-
- verifyReadRange(window, numOfWindows, blockPersistenceHandler);
- }
-
- @Test
- public void testReadRangeWithNoBlocks() {
- int maxEntries = 100;
-
- BlockPersistenceHandler blockPersistenceHandler = generateInMemoryTestBlockPersistenceHandler(maxEntries);
- Queue results = blockPersistenceHandler.readRange(1, 100);
- assertNotNull(results);
- assertEquals(0, results.size());
- }
-
- @Test
- public void testReadRangeWhenBlocksLessThanWindow() {
- int maxEntries = 100;
- int numOfBlocks = 9;
-
- BlockPersistenceHandler blockPersistenceHandler = generateInMemoryTestBlockPersistenceHandler(maxEntries);
- List blocks = generateBlocks(numOfBlocks);
- for (BlockStreamServiceGrpcProto.Block block : blocks) {
- blockPersistenceHandler.persist(block);
- }
-
- int window = 10;
-
- Queue results = blockPersistenceHandler.readRange(1, window);
- assertNotNull(results);
- assertEquals(numOfBlocks, results.size());
- }
-
- private static void verifyReadRange(
- int window,
- int numOfWindows,
- BlockPersistenceHandler