diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index d4fde6c4cc..7d82f720c2 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -312,22 +312,22 @@ jobs: rm ./*.deb # Reference Contracts - - name: checkout reference-contracts - uses: actions/checkout@v4 - with: - repository: AntelopeIO/reference-contracts - path: reference-contracts - ref: '${{needs.v.outputs.reference-contracts-ref}}' - - if: ${{ matrix.test == 'deb-install' }} - name: Install reference-contracts deps - run: | - apt-get -y install cmake build-essential - - name: Build & Test reference-contracts - run: | - cmake -S reference-contracts -B reference-contracts/build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On -DSYSTEM_ENABLE_LEAP_VERSION_CHECK=Off -DSYSTEM_ENABLE_CDT_VERSION_CHECK=Off - cmake --build reference-contracts/build -- -j $(nproc) - cd reference-contracts/build/tests - ctest --output-on-failure -j $(nproc) +# - name: checkout reference-contracts +# uses: actions/checkout@v4 +# with: +# repository: AntelopeIO/reference-contracts +# path: reference-contracts +# ref: '${{needs.v.outputs.reference-contracts-ref}}' +# - if: ${{ matrix.test == 'deb-install' }} +# name: Install reference-contracts deps +# run: | +# apt-get -y install cmake build-essential +# - name: Build & Test reference-contracts +# run: | +# cmake -S reference-contracts -B reference-contracts/build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On -DSYSTEM_ENABLE_LEAP_VERSION_CHECK=Off -DSYSTEM_ENABLE_CDT_VERSION_CHECK=Off +# cmake --build reference-contracts/build -- -j $(nproc) +# cd reference-contracts/build/tests +# ctest --output-on-failure -j $(nproc) all-passing: name: All Required Tests Passed diff --git a/docs/block_production/lifecycle.md b/docs/block_production/lifecycle.md new file mode 100644 index 0000000000..cba10965f0 --- /dev/null +++ b/docs/block_production/lifecycle.md @@ -0,0 +1,27 @@ +The following diagram describes Leap block production, as implemented in `libraries/chain/controller.cpp`: + +```mermaid +flowchart TD + pp[producer_plugin] --> D + A("replay()"):::fun --> B("replay_push_block()"):::fun + B --> E("maybe_switch_forks()"):::fun + C("init()"):::fun ---> E + C --> A + D("push_block()"):::fun ---> E + subgraph G["apply_block()"] + direction TB + start -- "stage = Ø" --> sb + sb("start_block()"):::fun -- "stage = building_block" --> et + et["execute transactions" ] -- "stage = building_block" --> fb("finalize_block()"):::fun + fb -- "stage = assembled block" --> cb["add transaction metadata and create completed block"] + cb -- "stage = completed block" --> commit("commit_block() (where we [maybe] add to fork_db and mark valid)"):::fun + + end + B ----> start + E --> G + D --> F("log_irreversible()"):::fun + commit -- "stage = Ø" --> F + F -- "if in irreversible mode" --> G + + classDef fun fill:#f96 +``` \ No newline at end of file diff --git a/hostuff-pseudo.txt b/hostuff-pseudo.txt deleted file mode 100644 index 997719a74f..0000000000 --- a/hostuff-pseudo.txt +++ /dev/null @@ -1,663 +0,0 @@ -/* - - Antelope + Hotstuff = Roasted Antelope - - Roasted Antelope is a proposal for an upgrade to the Antelope consensus model, based on the Hotstuff protocol. This document defines extended pseudocode for this upgrade, and should be relatively straightforward to plug into the existing Antelope codebase. - - Notes: This pseudocode is based on algorithms 4 (safety) & 5 (liveness) of the "HotStuff: BFT Consensus in the Lens of Blockchain" paper. - - There are a few minor modifications to the pacemaker algorithm implementation, allowing to decompose the role of block producer into the 3 sub-roles of block proposer, block finalizer and view leader. - - This pseudocode handles each role separately. A single entity may play multiple roles. - - This pseudocode also covers changes to the finalizer set, which include transition from and into dual_set mode. - - Under dual_set mode, the incumbent and the incoming finalizer sets are jointly confirming views. - - As is the case with the algorithm 4, the notion of view is almost completely decoupled from the safety protocol, and is aligned to the liveness protocol instead. - -*/ - -// Data structures - -//evolved from producer_schedule -struct schedule(){ - - //currently, block_proposers, block_finalizers and view_leaders sets are block producers. A future upgrade can further define the selection process for each of these roles, and result in distinct sets of variable size without compromising the protocol's safety - - block_proposers = [...]; - - block_finalizers = [...] //current / incumbent block finalizers set - incoming_block_finalizers = [...]; //incoming block finalizers set, null if operating in single_set mode - - view_leaders = [...]; - - current_leader //defined by pacemaker, abstracted; - current_proposer //defined by pacemaker, abstracted; - - get_proposer(){return current_proposer} ; - get_leader(){return current_leader} ; - - //returns a list of incumbent finalizers - get_finalizers(){return block_finalizers} ; - - //returns a combined list of incoming block_finalizers - get_incoming_finalizers(){return incoming_block_finalizers} ; - -} - -//quorum certificate -struct qc(){ - - //block candidate ID, acts as node message - block_id - - //aggregate signature of finalizers part of this qc - agg_sig - - //data structure which includes the list of signatories included in the aggregate, (for easy aggregate signature verification). It can also support dual_set finalization mode - sig_bitset - - //aggregate signature of incoming finalizers part of this qc, only present if we are operating in dual_set finalization mode - incoming_agg_sig; - - //data structure which includes the list of incoming signatories included in the aggregate (for easy verification), only present if we are operating in dual_set finalization mode - incoming_sig_bitset; - - //get block height from block_id - get_height() = ; //abstracted [...] - - //check if a quorum of valid signatures from active (incumbent) finalizers has been met according to me._threshold - quorum_met() = ; //abstracted [...] - - //check if a quorum of valid signatures from both active (incumbent) finalizers AND incoming finalizers has been met. Quorums are calculated for each of the incumbent and incoming sets separately, and both sets must independently achieve quorum for this function to return true - extended_quorum_met() = ;//abstracted [...] - -} - -//proposal -struct block_candidate(){ - - //previous field of block header - parent - - //list of actions to be executed - cmd - - //qc justification for this block - justify - - //block id, which also contains block height - block_id - - //return block height from block_id - get_height() = ; //abstracted [...]; - - //return the actual block this candidate wraps around, including header + transactions / actions - get_block() = ; //abstracted [...]; - -} - -//available msg types -enum msg_type { - new_view //used when leader rotation is required - new_block //used when proposer is different from leader - qc //progress - vote //vote by replicas -} - -// Internal book keeping variables - -//Hotstuff protocol - -me._v_height; //height of last voted node - -me._b_lock; //locked block_candidate -me._b_exec; //last committed block_candidate -me._b_leaf; //current block_candidate - -me._high_qc; //highest known QC - -me._dual_set_height; //dual set finalization mode active as of this block height, -1 if operating in single_set mode. A finalizer set change is successfully completed when a block is committed at the same or higher block height - -//chain data - -me._b_temporary; //temporary storage of received block_candidates. Pruning rules are abstracted - -me._schedule //current block producer schedule, mapped to new structure - - -//global configuration - -me._block_interval; //expected block time interval, default is 0.5 second -me._blocks_per_round; //numbers of blocks per round, default is 12 - -me._threshold; //configurable quorum threshold - - -//network_plugin protocol hooks and handlers - -//generic network message generation function -network_plugin.new_message(type, ...data){ - - new_message.type = type; - new_message[...] = ...data; - - return new_message; -} - -network_plugin.broadcast(msg){ - - //broadcasting to other nodes, replicas, etc. - - //nodes that are not part of consensus making (not proposer, finalizer or leader) relay consensus messages, but take no action on them - - //abstracted [...] - -} - -//on new_block message received event handler (coming from a proposer that is not leader) -network_plugin.on_new_block_received(block){ - - //abstracted [...] - - pacemaker.on_beat(block); //check if we are leader and need to create a view for this block - -} - -//on vote received event handler -network_plugin.on_vote_received(msg){ - - //abstracted [...] - - hotstuff.on_vote_received(msg); - -} - - - - - -//Pacemaker algorithm, regulating liveness - - -//on_beat(block) is called in the following cases : -//1) As a block proposer, when we generate a block_candidate -//2) As a view leader, when we receive a block_candidate from a proposer -pacemaker.on_beat(block){ - - am_i_proposer = me._schedule.get_proposer() == me; //am I proposer? - am_i_leader = me._schedule.get_leader() == me; //am I leader? - - if (!am_i_proposer && !am_i_leader) return; //replicas don't have to do anything here, unless they are also leader and/or proposer - - block_candidate = new_proposal_candidate(block); - - //if i'm the leader - if (am_i_leader){ - - if (!am_i_proposer){ - - //block validation hook - //abstracted [...] - - //If I am the leader but not the proposer, check if proposal is safe. - if(!hotstuff.is_node_safe(block_candidate)) return; - - } - - me._b_leaf = block_candidate; - - } - - if (am_i_leader) msg = new_message(qc, block_candidate); //if I'm leader, send qc message - else msg = new_message(new_block, block_candidate); //if I'm only proposer, send new_block message - - network_plugin.broadcast(msg); //broadcast message - -} - -//update high qc -pacemaker.update_high_qc(new_high_qc){ - - // if new high QC is higher than current, update to new - if (new_high_qc.get_height()>me._high_qc.block.get_height()){ - - me._high_qc = new_high_qc; - me._b_leaf = me._b_temporary.get(me._high_qc.block_id); - - } - -} - -pacemaker.on_msg_received(msg){ - - //p2p message relay logic - //abstracted [...] - - if (msg.type == new_view){ - pacemaker.update_high_qc(msg.high_qc); - } - else if (msg.type == qc){ - hotstuff.on_proposal_received(msg); - } - else if (msg.type == vote){ - hotstuff.on_vote_received(msg); - } -} - -//returns the proposer, according to schedule -pacemaker.get_proposer(){ - return schedule.get_proposer(); //currently active producer is proposer -} - -//returns the leader, according to schedule -pacemaker.get_leader(){ - return schedule.get_leader(); //currently active producer is leader -} - - -/* - - Corresponds to onNextSyncView in hotstuff paper. Handles both leader rotations as well as timeout if leader fails to progress - - Note : for maximum liveness, on_leader_rotate() should be called by replicas as early as possible when either : - - 1) no more blocks are expected before leader rotation occurs (eg: after receiving the final block expected from the current leader before the handoff) OR - - 2) if we reach (me._block_interval * (me._blocks_per_round - 1)) time into a specific view, and we haven't received the expected second to last block for this round. - - In scenarios where liveness is maintained, this relieves an incoming leader from having to wait until it has received n - f new_view messages at the beginning of a new view since it will already have the highest qc. - - In scenarios where liveness has been lost due to f + 1 faulty replicas, progress is impossible, so the safety rule rejects attempts at creating a qc until liveness has been restored. - -*/ - -pacemaker.on_leader_rotate(){ - - msg = new_message(new_view, me._high_qc); //add highest qc - - network_plugin.broadcast(msg); //broadcast message - -} - - - -//producer_plugin hook for block generation - -//on block produced event handler (block includes signature of proposer) -producer_plugin.on_block_produced(block){ - - //generate a new block extending from me._b_leaf - //abstracted [...] - - /* - - Include the highest qc we recorded so far. Nodes catching up or light clients have a proof that the block referred to as high qc is irreversible. - - We can merge the normal agg_sig / sig_bitset with the incoming_agg_sig / incoming_sig_bitset if the qc was generated in dual_set mode before we include the qc into the block, to save space - - */ - - block.qc = me._high_qc; - - pacemaker.on_beat(block); - -} - - - -//Hotstuff algorithm, regulating safety - -hotstuff.new_proposal_candidate(block) { - - b.parent = block.header.previous; - b.cmd = block.actions; - b.justify = me._high_qc; //or null if no _high_qc upon activation or chain launch - b.block_id = block.header.block_id(); - - //return block height from block_id - b.get_height() = //abstracted [...]; - - return b; -} - -//safenode predicate -hotstuff.is_node_safe(block_candidate){ - - monotony_check = false; - safety_check = false; - liveness_check = false; - - if (block_candidate.get_height() > me._v_height){ - monotony_check = true; - } - - if (me._b_lock){ - - //Safety check : check if this proposal extends the chain I'm locked on - if (extends(block_candidate, me._b_lock)){ - safety_check = true; - } - - //Liveness check : check if the height of this proposal's justification is higher than the height of the proposal I'm locked on. This allows restoration of liveness if a replica is locked on a stale block. - if (block_candidate.justify.get_height() > me._b_lock.get_height())){ - liveness_check = true; - } - - } - else { - - //if we're not locked on anything, means the protocol just activated or chain just launched - liveness_check = true; - safety_check = true; - } - - //Lemma 2 - return monotony_check && (liveness_check || safety_check); //return true if monotony check and at least one of liveness or safety check evaluated successfully - -} - -//verify if b_descendant extends a branch containing b_ancestor -hotstuff.extends(b_descendant, b_ancestor){ - - //in order to qualify as extending b_ancestor, b_descendant must descend from b_ancestor - //abstracted [...] - - return true || false; - -} - -//creates or get, then return the current qc for this block candidate -hotstuff.create_or_get_qc(block_candidate){ - - //retrieve or create unique QC for this stage, primary key is block_id - //abstracted [...] - - return qc; // V[] - -} - -//add a signature to a qc -hotstuff.add_to_qc(qc, finalizer, sig){ - - //update qc reference - - // V[b] - - if (schedule.get_finalizers.contains(finalizer) && !qc.sig_bitset.contains(finalizer)){ - qc.sig_bitset += finalizer; - qc.agg_sig += sig; - } - - if (schedule.get_incoming_finalizers.contains(finalizer) && !qc.incoming_sig_bitset.contains(finalizer)){ - qc.incoming_sig_bitset += finalizer; - qc.incoming_agg_sig += sig; - } - -} - -//when we receive a proposal -hotstuff.on_proposal_received(msg){ - - //block candidate validation hook (check if block is valid, etc.), return if not - //abstracted [...] - - /* - - First, we verify if we have already are aware of a proposal at this block height - - */ - - //Lemma 1 - stored_block = me._b_temporary.get(msg.block_candidate.get_height()); - - //check if I'm finalizer, in which case I will optionally sign and update my internal state - - am_i_finalizer = get_finalizers.contains(me) || get_incoming_finalizers(me); - - skip_sign = false; - - //If we already have a proposal at this height, we must not double sign so we skip signing, else we store the proposal and and we continue - if (stored_block) skip_sign = true; - else me._b_temporary.add(msg.block_candidate); //new proposal - - //if I am a finalizer for this proposal and allowed to sign, test safenode predicate for possible vote - if (am_i_finalizer && !skip_sign && hotstuff.is_node_safe(msg.block_candidate)){ - - me._v_height = msg.block_candidate.get_height(); - - /* - Sign message. - - In Hotstuff, we need to sign a tuple of (msg.view_type, msg.view_number and msg.node). - - In our implementation, the view_type is generic, and the view_number is contained in the block_id, which is also the message. - - Therefore, we can ensure uniqueness by replacing the view_type part of the tuple with msg.block_candidate.justify.agg_sig. - - The digest to sign now becomes the tuple (msg.block_candidate.justify.agg_sig, msg.block_candidate.block_id). - - */ - - sig = = _dual_set_height){ - quorum_met = qc.extended_quorum_met(); - } - else quorum_met = qc.quorum_met(); - - if (quorum_met){ - - pacemaker.update_high_qc(qc); - - } - -} - -//internal state update of replica -hotstuff.update(block_candidate){ - - b_new = block_candidate; - - b2 = me._b_temporary.get(b_new.justify.block_id); //first phase, prepare - b1 = me._b_temporary.get(b2.justify.block_id); //second phase, precommit - b = me._b_temporary.get(b1.justify.block_id); //third phase, commit - - //if a proposed command for the transition of the finalizer set is included in b_new's commands (for which we don't have a qc). Nothing special to do, but can be a useful status to be aware of for external APIs. - new_proposed_transition = ; //abstracted [...] - - //if a transition command of the finalizer set is included in b2's commands (on which we now have a qc), we now know n - f replicas approved the transition. If no other transition is currently pending, it becomes pending. - new_pending_transition = ; //abstracted [...] - - if (new_pending_transition){ - me._dual_set_height = b_new.get_height() + 1; //if this block proves a quorum on a finalizer set transition, we now start using the extended_quorum_met() predicate until the transition is successfully completed - } - - //precommit phase on b2 - pacemaker.update_high_qc(block_candidate.justify); - - if (b1.get_height() > me._b_lock.get_height()){ - me._b_lock = b1; //commit phase on b1 - } - - //direct parent relationship verification - if (b2.parent == b1 && b1.parent == b){ - - //if we are currently operating in dual set mode reaching this point, and the block we are about to commit has a height higher or equal to me._dual_set_height, it means we have reached extended quorum on a view ready to be committed, so we can transition into single_set mode again, where the incoming finalizer set becomes the active finalizer set - if (me._dual_set_height != -1 && b.get_height() >= me._dual_set_height){ - - //sanity check to verify quorum on justification for b (b1), should always evaluate to true - if (b1.justify.extended_quorum_met()){ - - //reset internal state to single_set mode, with new finalizer set - me._schedule.block_finalizers = me_.schedule.incoming_finalizers; - me_.schedule.incoming_finalizers = null; - me._dual_set_height = -1; - - } - - } - - hotstuff.commit(b); - - me._b_exec = b; //decide phase on b - - } - -} - -//commit block and execute its actions against irreversible state -hotstuff.commit(block_candidate){ - - //check if block_candidate already committed, if so, return because there is nothing to do - - //can only commit newer blocks - if (me._b_exec.get_height() < block_candidate.get_height()){ - - parent_b = _b_temporary.get(block_candidate.parent); - - hotstuff.commit(parent_b); //recursively commit all non-committed ancestor blocks sequentially first - - //execute block cmd - //abstracted [...] - - } -} - - -/* - - Proofs : - - Safety : - - Lemma 1. Let b and w be two conflicting block_candidates such that b.get_height() = w.get_height(), then they cannot both have valid quorum certificates. - - Proof. Suppose they can, so both b and w receive 2f + 1 votes, among which there are at least f + 1 honest replicas - voting for each block_candidate, then there must be an honest replica that votes for both, which is impossible because b and w - are of the same height. - - This is enforced by the function labeled "Lemma 1". - - Lemma 2. Let b and w be two conflicting block_candidates. Then they cannot both become committed, each by an honest replica. - - Proof. We prove this lemma by contradiction. Let b and w be two conflicting block_candidates at different heights. - Assume during an execution, b becomes committed at some honest replica via the QC Three-Chain b. - - For this to happen, b must be the parent and justification of b1, b1 must be the parent and justification of b2 and b2 must be the justification of a new proposal b_new. - - Likewise w becomes committed at some honest replica via the QC Three-Chain w. - - For this to happen, w must be the parent and justification of w1, w1 must be the parent and justification of w2 and w2 must be the justification of a new proposal w_new. - - By lemma 1, since each of the block_candidates b, b1, b2, w, w1, w2 have QCs, then without loss of generality, we assume b.get_height() > w2.get_height(). - - We now denote by qc_s the QC for a block_candidate with the lowest height larger than w2.get_height(), that conflicts with w. - - Assuming such qc_s exists, for example by being the justification for b1. Let r denote a correct replica in the intersection of w_new.justify and qc_s. By assumption of minimality of qc_s, the lock that r has on w is not changed before qc_s is formed. Now, consider the invocation of on_proposal_received with a message carrying a conflicting block_candidate b_new such that b_new.block_id = qc_s.block_id. By assumption, the condition on the lock (see line labeled "Lemma 2") is false. - - On the other hand, the protocol requires t = b_new.justifty to be an ancestor of b_new. By minimality of qc_s, t.get_height() <= w2.get_height(). Since qc_s.block_id conflicts with w.block_id, t cannot be any of w, w1 or w2. Then, t.get_height() < w.get_height() so the other half of the disjunct is also false. Therefore, r will not vote for b_new, contradicting the assumption of r. - - Theorem 3. Let cmd1 and cmd2 be any two commands where cmd1 is executed before cmd2 by some honest replica, then any honest replica that executes cmd2 must execute cm1 before cmd2. - - Proof. Denote by w the node that carries cmd1, b carries cmd2. From Lemma 1, it is clear the committed nodes are at distinct heights. Without loss of generality, assume w.get_height() < b.height(). The commitment of w and b are handled by commit(w1) and commit(b1) in update(), where w is an ancestor of w1 and b is an ancestor of b1. According to Lemma 2, w1 must not conflict with b1, so w does not conflict with b. Then, w is an ancestor of b, and when any honest replica executes b, it must first execute w by the recursive logic in commit(). - - Liveness : - - In order to prove liveness, we first show that after GST, there is a bounded duration T_f such that if all correct replicas remain in view v during T_f and the leader for view v is correct, then a decision is reached. We define qc_1 and qc_2 as matching QCs if qc_1 and qc_2 are both valid and qc_1.block_id = qc_2.block_id. - - Lemma 4. If a correct replica is locked such that me._b_lock.justify = generic_qc_2, then at least f + 1 correct replicas voted for some generic_qc_1 matching me._b_lock.justify. - - Proof. Suppose replica r is locked on generic_qc_2. Then, (n-f) votes were cast for the matching generic_qc_1 in an earlier phase (see line labeled "Lemma 4"), out of which at least f + 1 were from correct replicas. - - Theorem 5. After GST, there exists a bounded time period T_f such that if all correct replicas remain in view v during - T_f and the leader for view v is correct, then a decision is reached. - - Proof. Starting in a new view, the leader has collected (n − f) new_view or vote messages and calculates its high_qc before - broadcasting a qc message. Suppose among all replicas (including the leader itself), the highest kept lock - is me._b_lock.justify = generic_qc_new_2. - - By Lemma 4, we know there are at least f + 1 correct replicas that voted for a generic_qc_new_1 matching generic_qc_new_2, and have already sent them to the leader in their new_view or vote messages. Thus, the leader must learn a matching generic_qc_new_2 in at least one of these new_view or vote messages and use it as high_qc in its initial qc message for this view. By the assumption, all correct replicas are synchronized in their view and the leader is non-faulty. Therefore, all correct replicas will vote at a specific height, since in is_node_safe(), the condition on the line labeled "Liveness check" is satisfied. This is also the case if the block_id in the message conflicts with a replica’s stale me._b_lock.justify.block_id, such that the condition on the line labeled "Safety check" is evaluated to false. - - Then, after the leader has a valid generic_qc for this view, all replicas will vote at all the following heights, leading to a new commit decision at every step. After GST, the duration T_f for the steps required to achieve finality is of bounded length. - - The protocol is Optimistically Responsive because there is no explicit “wait-for-∆” step, and the logical disjunction in is_node_safe() is used to override a stale lock with the help of the Three-Chain paradigm. - - Accountability and finality violation : - - Let us define b_descendant as a descendant of b_root, such that hotstuff.extends(b_descendant, b_root) returns true. - - Suppose b_descendant's block header includes a high_qc field representing a 2f + 1 vote on b_root. When we become aware of a new block where the high_qc points to b_descendant or to one of b_descendant's descendants, we know b_root, as well as all of b_root's ancestors, have been committed and are final. - - Theorem 6. Let b_root and w_root be two conflicting block_candidates of the same height, such that hotstuff.extends(b_root, w_root) and hotstuff.extends(w_root, b_root) both return false, and that b_root.get_height() == w_root.get_height(). Then they cannot each have a valid quorum certificate unless a finality violation has occurred. In the case of such finality violation, any party in possession of b_root and w_root would be able to prove complicity or exonerate block finalizers having taken part or not in causing the finality violation. - - Proof. Let b_descendant and w_descendant be descendants of respectively b_root and w_root, such that hotstuff.extends(b_descendant, b_root) and hotstuff.extends(w_descendant, w_root) both return true. - - By Lemma 1, we know that a correct replica cannot sign two conflicting block candidates at the same height. - - For each of b_root and w_root, we can identify and verify the signatures of finalizers, by ensuring the justification's agg_sig matches the aggregate key calculated from the sig_bitset and the schedule. - - Therefore, for b_root and w_root to both be included as qc justification into descendant blocks, at least one correct replica must have signed two vote messages on conflicting block candidates at the same height, which is impossible due to the checks performed in the function with comment "Lemma 1". Such an event would be a finality violation. - - For a finality violation to occur, the intersection of the finalizers that have voted for both b_root and w_root, as evidenced by the high_qc of b_descendant and w_descendant must represent a minimum of f + 1 faulty nodes. - - By holding otherwise valid blocks where a qc for b_root and w_root exist, the finality violation can be proved trivially, simply by calculating the intersection and the symmetrical difference of the finalizer sets having voted for these two proposals. The finalizers contained in the intersection can therefore be blamed for the finality violation. The symmetric difference of finalizers that have voted for either proposal but not for both can be exonerated from wrong doing, thus satisfying the Accountability property requirement. - - Finalizer set transition (safety proof) : - - Replicas can operate in either single_set or dual_set validation mode. In single_set mode, quorum is calculated and evaluated only for the active finalizer set. In dual_set mode, independant quorums are calculated over each of the active (incumbent) finalizer set and the incoming finalizer set, and are evaluated separately. - - Let us define active_set as the active finalizer set, as determined by the pacemaker at any given point while a replica is operating in single_set mode. The active_set is known to all active replicas that are in sync. While operating in single_set mode, verification of quorum on proposals is achieved through the use of the active_set.quorum_met() predicate. - - Let us define incumbent_set and incoming_set as, respectively, the previously active_set and a new proposed set of finalizers, starting at a point in time when a replica becomes aware of a quorum on a block containing a finalizer set transition proposal. This triggers the transition into dual_set mode for this replica. - - As the replica is operating in dual_set mode, the quorum_met() predicate used in single_set mode is temporarily replaced with the extended_quorum_met() predicate, which only returns true if (incumbent_set.quorum_met() AND incoming_set.quorum_met()). - - As we demonstrated in Lemma 1, Lemma 2 and Theorem 3, the protocol is safe when n - f correct replicas achieve quorum on proposals. - - Therefore, no safety is lost as we are transitioning into dual_set mode, since this transition only adds to the quorum constraints guaranteeing safety. However, this comes at the cost of decreased plausible liveness, because of the additional constraint of also requiring the incoming finalizer set to reach quorum in order to progress. //todo : discuss possible recovery from incoming finalizer set liveness failure - - Theorem 7. A replica can only operate in either single_set mode or in dual_set mode. While operating in dual_set mode, the constraints guaranteeing safety of single_set mode still apply, and thus the dual_set mode constraints guaranteeing safety can only be equally or more restrictive than when operating in single_set mode. - - Proof. Suppose a replica is presented with a proposal b_new, which contains a qc on a previous proposal b_old such that hotstuff.extends(b_new, b_old) returns true, and that the replica could operate in both single_set mode and dual_set mode at the same time, in such a way that active_set == incumbent_set and that an unknown incoming_set also exists. - - As it needs to verify the qc, the replica invokes both quorum_met() and extended_quorum_met() predicates. - - It follows that, since active_set == incumbent_set, and that active_set.quorum_met() is evaluated in single_set mode, and incumbent_set.quorum_met() is evaluated as part of the extended_quorum_met() predicate in dual_set mode, the number of proposals where (incumbent_set.quorum_met() AND incoming_set.quorum_met()) is necessarily equal or smaller than the number of proposals where active_set.quorum_met(). In addition, any specific proposal where active_set.quorum_met() is false would also imply (incumbent_set.quorum_met() AND incoming_set.quorum_met()) is false as well. - - Therefore, the safety property is not weakened while transitioning into dual_set mode. - -*/ - - - diff --git a/libraries/chain/CMakeLists.txt b/libraries/chain/CMakeLists.txt index f0227b4aa9..048df201cc 100644 --- a/libraries/chain/CMakeLists.txt +++ b/libraries/chain/CMakeLists.txt @@ -97,6 +97,8 @@ add_library( eosio_chain transaction.cpp block.cpp block_header.cpp + block_header_state.cpp + block_state.cpp block_header_state_legacy.cpp block_state_legacy.cpp fork_database.cpp diff --git a/libraries/chain/abi_serializer.cpp b/libraries/chain/abi_serializer.cpp index c51d14b46c..e11cf9da39 100644 --- a/libraries/chain/abi_serializer.cpp +++ b/libraries/chain/abi_serializer.cpp @@ -634,7 +634,7 @@ namespace eosio { namespace chain { _variant_to_binary(type, var, ds, ctx); } - void impl::abi_to_variant::add_block_header_instant_finality_extension( mutable_variant_object& mvo, const flat_multimap& header_exts ) { + void impl::abi_to_variant::add_block_header_instant_finality_extension( mutable_variant_object& mvo, const header_extension_multimap& header_exts ) { if (header_exts.count(instant_finality_extension::extension_id())) { const auto& if_extension = std::get(header_exts.lower_bound(instant_finality_extension::extension_id())->second); diff --git a/libraries/chain/block_header.cpp b/libraries/chain/block_header.cpp index d02018a153..894306586f 100644 --- a/libraries/chain/block_header.cpp +++ b/libraries/chain/block_header.cpp @@ -25,7 +25,7 @@ namespace eosio { namespace chain { return result; } - flat_multimap block_header::validate_and_extract_header_extensions()const { + header_extension_multimap block_header::validate_and_extract_header_extensions()const { using decompose_t = block_header_extension_types::decompose_t; flat_multimap results; diff --git a/libraries/chain/block_header_state.cpp b/libraries/chain/block_header_state.cpp new file mode 100644 index 0000000000..c73b39a135 --- /dev/null +++ b/libraries/chain/block_header_state.cpp @@ -0,0 +1,202 @@ +#include +#include +#include + +namespace eosio::chain { + +block_header_state_core block_header_state_core::next(uint32_t last_qc_block_height, bool is_last_qc_strong) const { + // no state change if last_qc_block_height is the same + if (last_qc_block_height == this->last_qc_block_height) { + return {*this}; + } + + EOS_ASSERT(last_qc_block_height > this->last_qc_block_height, block_validate_exception, + "new last_qc_block_height must be greater than old last_qc_block_height"); + + auto old_last_qc_block_height = this->last_qc_block_height; + auto old_final_on_strong_qc_block_height = this->final_on_strong_qc_block_height; + + block_header_state_core result{*this}; + + if (is_last_qc_strong) { + // last QC is strong. We can progress forward. + + // block with old final_on_strong_qc_block_height becomes irreversible + if (old_final_on_strong_qc_block_height.has_value()) { + result.last_final_block_height = *old_final_on_strong_qc_block_height; + } + + // next block which can become irreversible is the block with + // old last_qc_block_height + if (old_last_qc_block_height.has_value()) { + result.final_on_strong_qc_block_height = *old_last_qc_block_height; + } + } else { + // new final_on_strong_qc_block_height should not be present + result.final_on_strong_qc_block_height.reset(); + + // new last_final_block_height should be the same as the old last_final_block_height + } + + // new last_qc_block_height is always the input last_qc_block_height. + result.last_qc_block_height = last_qc_block_height; + + return result; +} + + +block_header_state block_header_state::next(const block_header_state_input& data) const { + block_header_state result; + +#if 0 + if (when != block_timestamp_type()) { + EOS_ASSERT(when > header.timestamp, block_validate_exception, "next block must be in the future"); + } else { + (when = header.timestamp).slot++; + } + result.block_num = block_num + 1; + result.previous = id; + result.timestamp = when; + result.active_schedule_version = active_schedule.version; + result.prev_activated_protocol_features = activated_protocol_features; + + auto proauth = get_scheduled_producer(when); + + result.valid_block_signing_authority = proauth.authority; + result.producer = proauth.producer_name; + result.last_proposed_finalizer_policy_generation = last_proposed_finalizer_policy_generation; + + result.blockroot_merkle = blockroot_merkle; + result.blockroot_merkle.append(id); + + result.prev_pending_schedule = pending_schedule; + + if (hotstuff_activated) { + result.confirmed = hs_block_confirmed; + result.dpos_proposed_irreversible_blocknum = 0; + // fork_database will prefer hotstuff blocks over dpos blocks + result.dpos_irreversible_blocknum = hs_dpos_irreversible_blocknum; + // Change to active on the next().next() producer block_num + // TODO: use calculated hotstuff lib instead of block_num + if (pending_schedule.schedule.producers.size() && + block_num >= detail::get_next_next_round_block_num(when, pending_schedule.schedule_lib_num)) { + result.active_schedule = pending_schedule.schedule; + result.was_pending_promoted = true; + } else { + result.active_schedule = active_schedule; + } + + } else { + auto itr = producer_to_last_produced.find(proauth.producer_name); + if (itr != producer_to_last_produced.end()) { + EOS_ASSERT(itr->second < (block_num + 1) - num_prev_blocks_to_confirm, producer_double_confirm, + "producer ${prod} double-confirming known range", + ("prod", proauth.producer_name)("num", block_num + 1)("confirmed", num_prev_blocks_to_confirm)( + "last_produced", itr->second)); + } + + result.confirmed = num_prev_blocks_to_confirm; + + /// grow the confirmed count + static_assert(std::numeric_limits::max() >= (config::max_producers * 2 / 3) + 1, + "8bit confirmations may not be able to hold all of the needed confirmations"); + + // This uses the previous block active_schedule because thats the "schedule" that signs and therefore confirms + // _this_ block + auto num_active_producers = active_schedule.producers.size(); + uint32_t required_confs = (uint32_t)(num_active_producers * 2 / 3) + 1; + + if (confirm_count.size() < config::maximum_tracked_dpos_confirmations) { + result.confirm_count.reserve(confirm_count.size() + 1); + result.confirm_count = confirm_count; + result.confirm_count.resize(confirm_count.size() + 1); + result.confirm_count.back() = (uint8_t)required_confs; + } else { + result.confirm_count.resize(confirm_count.size()); + memcpy(&result.confirm_count[0], &confirm_count[1], confirm_count.size() - 1); + result.confirm_count.back() = (uint8_t)required_confs; + } + + auto new_dpos_proposed_irreversible_blocknum = dpos_proposed_irreversible_blocknum; + + int32_t i = (int32_t)(result.confirm_count.size() - 1); + uint32_t blocks_to_confirm = num_prev_blocks_to_confirm + 1; /// confirm the head block too + while (i >= 0 && blocks_to_confirm) { + --result.confirm_count[i]; + // idump((confirm_count[i])); + if (result.confirm_count[i] == 0) { + uint32_t block_num_for_i = result.block_num - (uint32_t)(result.confirm_count.size() - 1 - i); + new_dpos_proposed_irreversible_blocknum = block_num_for_i; + // idump((dpos2_lib)(block_num)(dpos_irreversible_blocknum)); + + if (i == static_cast(result.confirm_count.size() - 1)) { + result.confirm_count.resize(0); + } else { + memmove(&result.confirm_count[0], &result.confirm_count[i + 1], result.confirm_count.size() - i - 1); + result.confirm_count.resize(result.confirm_count.size() - i - 1); + } + + break; + } + --i; + --blocks_to_confirm; + } + + result.dpos_proposed_irreversible_blocknum = new_dpos_proposed_irreversible_blocknum; + result.dpos_irreversible_blocknum = calc_dpos_last_irreversible(proauth.producer_name); + + if (pending_schedule.schedule.producers.size() && + result.dpos_irreversible_blocknum >= pending_schedule.schedule_lib_num) { + result.active_schedule = pending_schedule.schedule; + + flat_map new_producer_to_last_produced; + + for (const auto& pro : result.active_schedule.producers) { + if (pro.producer_name == proauth.producer_name) { + new_producer_to_last_produced[pro.producer_name] = result.block_num; + } else { + auto existing = producer_to_last_produced.find(pro.producer_name); + if (existing != producer_to_last_produced.end()) { + new_producer_to_last_produced[pro.producer_name] = existing->second; + } else { + new_producer_to_last_produced[pro.producer_name] = result.dpos_irreversible_blocknum; + } + } + } + new_producer_to_last_produced[proauth.producer_name] = result.block_num; + + result.producer_to_last_produced = std::move(new_producer_to_last_produced); + + flat_map new_producer_to_last_implied_irb; + + for (const auto& pro : result.active_schedule.producers) { + if (pro.producer_name == proauth.producer_name) { + new_producer_to_last_implied_irb[pro.producer_name] = dpos_proposed_irreversible_blocknum; + } else { + auto existing = producer_to_last_implied_irb.find(pro.producer_name); + if (existing != producer_to_last_implied_irb.end()) { + new_producer_to_last_implied_irb[pro.producer_name] = existing->second; + } else { + new_producer_to_last_implied_irb[pro.producer_name] = result.dpos_irreversible_blocknum; + } + } + } + + result.producer_to_last_implied_irb = std::move(new_producer_to_last_implied_irb); + + result.was_pending_promoted = true; + } else { + result.active_schedule = active_schedule; + result.producer_to_last_produced = producer_to_last_produced; + result.producer_to_last_produced[proauth.producer_name] = result.block_num; + result.producer_to_last_implied_irb = producer_to_last_implied_irb; + result.producer_to_last_implied_irb[proauth.producer_name] = dpos_proposed_irreversible_blocknum; + } + } // !hotstuff_activated +#endif + + return result; +} + + +} // namespace eosio::chain \ No newline at end of file diff --git a/libraries/chain/block_header_state_legacy.cpp b/libraries/chain/block_header_state_legacy.cpp index b7b21ae569..156efb4d24 100644 --- a/libraries/chain/block_header_state_legacy.cpp +++ b/libraries/chain/block_header_state_legacy.cpp @@ -23,54 +23,6 @@ namespace eosio { namespace chain { } } - block_header_state_core::block_header_state_core( uint32_t last_final_block_height, - std::optional final_on_strong_qc_block_height, - std::optional last_qc_block_height ) - : - last_final_block_height(last_final_block_height), - final_on_strong_qc_block_height(final_on_strong_qc_block_height), - last_qc_block_height(last_qc_block_height) {} - - block_header_state_core block_header_state_core::next( uint32_t last_qc_block_height, - bool is_last_qc_strong) { - // no state change if last_qc_block_height is the same - if( last_qc_block_height == this->last_qc_block_height ) { - return {*this}; - } - - EOS_ASSERT( last_qc_block_height > this->last_qc_block_height, block_validate_exception, "new last_qc_block_height must be greater than old last_qc_block_height" ); - - auto old_last_qc_block_height = this->last_qc_block_height; - auto old_final_on_strong_qc_block_height = this->final_on_strong_qc_block_height; - - block_header_state_core result{*this}; - - if( is_last_qc_strong ) { - // last QC is strong. We can progress forward. - - // block with old final_on_strong_qc_block_height becomes irreversible - if( old_final_on_strong_qc_block_height.has_value() ) { - result.last_final_block_height = *old_final_on_strong_qc_block_height; - } - - // next block which can become irreversible is the block with - // old last_qc_block_height - if( old_last_qc_block_height.has_value() ) { - result.final_on_strong_qc_block_height = *old_last_qc_block_height; - } - } else { - // new final_on_strong_qc_block_height should not be present - result.final_on_strong_qc_block_height.reset(); - - // new last_final_block_height should be the same as the old last_final_block_height - } - - // new last_qc_block_height is always the input last_qc_block_height. - result.last_qc_block_height = last_qc_block_height; - - return result; - } - producer_authority block_header_state_legacy::get_scheduled_producer( block_timestamp_type t )const { auto index = t.slot % (active_schedule.producers.size() * config::producer_repetitions); index /= config::producer_repetitions; @@ -95,7 +47,6 @@ namespace eosio { namespace chain { // If hotstuff_activated then use new consensus values and simpler active schedule update. // If notstuff is not activated then use previous pre-hotstuff consensus logic. pending_block_header_state_legacy block_header_state_legacy::next( block_timestamp_type when, - bool hotstuff_activated, uint16_t num_prev_blocks_to_confirm )const { pending_block_header_state_legacy result; @@ -123,128 +74,111 @@ namespace eosio { namespace chain { result.prev_pending_schedule = pending_schedule; - if (hotstuff_activated) { - result.confirmed = hs_block_confirmed; - result.dpos_proposed_irreversible_blocknum = 0; - // fork_database will prefer hotstuff blocks over dpos blocks - result.dpos_irreversible_blocknum = hs_dpos_irreversible_blocknum; - // Change to active on the next().next() producer block_num - // TODO: use calculated hotstuff lib instead of block_num - if( pending_schedule.schedule.producers.size() && - block_num >= detail::get_next_next_round_block_num(when, pending_schedule.schedule_lib_num)) { - result.active_schedule = pending_schedule.schedule; - result.was_pending_promoted = true; - } else { - result.active_schedule = active_schedule; - } - + auto itr = producer_to_last_produced.find( proauth.producer_name ); + if( itr != producer_to_last_produced.end() ) { + EOS_ASSERT( itr->second < (block_num+1) - num_prev_blocks_to_confirm, producer_double_confirm, + "producer ${prod} double-confirming known range", + ("prod", proauth.producer_name)("num", block_num+1) + ("confirmed", num_prev_blocks_to_confirm)("last_produced", itr->second) ); + } + + result.confirmed = num_prev_blocks_to_confirm; + + /// grow the confirmed count + static_assert(std::numeric_limits::max() >= (config::max_producers * 2 / 3) + 1, "8bit confirmations may not be able to hold all of the needed confirmations"); + + // This uses the previous block active_schedule because thats the "schedule" that signs and therefore confirms _this_ block + auto num_active_producers = active_schedule.producers.size(); + uint32_t required_confs = (uint32_t)(num_active_producers * 2 / 3) + 1; + + if( confirm_count.size() < config::maximum_tracked_dpos_confirmations ) { + result.confirm_count.reserve( confirm_count.size() + 1 ); + result.confirm_count = confirm_count; + result.confirm_count.resize( confirm_count.size() + 1 ); + result.confirm_count.back() = (uint8_t)required_confs; } else { - auto itr = producer_to_last_produced.find( proauth.producer_name ); - if( itr != producer_to_last_produced.end() ) { - EOS_ASSERT( itr->second < (block_num+1) - num_prev_blocks_to_confirm, producer_double_confirm, - "producer ${prod} double-confirming known range", - ("prod", proauth.producer_name)("num", block_num+1) - ("confirmed", num_prev_blocks_to_confirm)("last_produced", itr->second) ); - } - - result.confirmed = num_prev_blocks_to_confirm; - - /// grow the confirmed count - static_assert(std::numeric_limits::max() >= (config::max_producers * 2 / 3) + 1, "8bit confirmations may not be able to hold all of the needed confirmations"); - - // This uses the previous block active_schedule because thats the "schedule" that signs and therefore confirms _this_ block - auto num_active_producers = active_schedule.producers.size(); - uint32_t required_confs = (uint32_t)(num_active_producers * 2 / 3) + 1; - - if( confirm_count.size() < config::maximum_tracked_dpos_confirmations ) { - result.confirm_count.reserve( confirm_count.size() + 1 ); - result.confirm_count = confirm_count; - result.confirm_count.resize( confirm_count.size() + 1 ); - result.confirm_count.back() = (uint8_t)required_confs; - } else { - result.confirm_count.resize( confirm_count.size() ); - memcpy( &result.confirm_count[0], &confirm_count[1], confirm_count.size() - 1 ); - result.confirm_count.back() = (uint8_t)required_confs; - } - - auto new_dpos_proposed_irreversible_blocknum = dpos_proposed_irreversible_blocknum; - - int32_t i = (int32_t)(result.confirm_count.size() - 1); - uint32_t blocks_to_confirm = num_prev_blocks_to_confirm + 1; /// confirm the head block too - while( i >= 0 && blocks_to_confirm ) { - --result.confirm_count[i]; - //idump((confirm_count[i])); - if( result.confirm_count[i] == 0 ) - { - uint32_t block_num_for_i = result.block_num - (uint32_t)(result.confirm_count.size() - 1 - i); - new_dpos_proposed_irreversible_blocknum = block_num_for_i; - //idump((dpos2_lib)(block_num)(dpos_irreversible_blocknum)); - - if (i == static_cast(result.confirm_count.size() - 1)) { - result.confirm_count.resize(0); - } else { - memmove( &result.confirm_count[0], &result.confirm_count[i + 1], result.confirm_count.size() - i - 1); - result.confirm_count.resize( result.confirm_count.size() - i - 1 ); - } - - break; - } - --i; - --blocks_to_confirm; - } + result.confirm_count.resize( confirm_count.size() ); + memcpy( &result.confirm_count[0], &confirm_count[1], confirm_count.size() - 1 ); + result.confirm_count.back() = (uint8_t)required_confs; + } - result.dpos_proposed_irreversible_blocknum = new_dpos_proposed_irreversible_blocknum; - result.dpos_irreversible_blocknum = calc_dpos_last_irreversible( proauth.producer_name ); + auto new_dpos_proposed_irreversible_blocknum = dpos_proposed_irreversible_blocknum; - if( pending_schedule.schedule.producers.size() && - result.dpos_irreversible_blocknum >= pending_schedule.schedule_lib_num ) + int32_t i = (int32_t)(result.confirm_count.size() - 1); + uint32_t blocks_to_confirm = num_prev_blocks_to_confirm + 1; /// confirm the head block too + while( i >= 0 && blocks_to_confirm ) { + --result.confirm_count[i]; + //idump((confirm_count[i])); + if( result.confirm_count[i] == 0 ) { - result.active_schedule = pending_schedule.schedule; - - flat_map new_producer_to_last_produced; - - for( const auto& pro : result.active_schedule.producers ) { - if( pro.producer_name == proauth.producer_name ) { - new_producer_to_last_produced[pro.producer_name] = result.block_num; + uint32_t block_num_for_i = result.block_num - (uint32_t)(result.confirm_count.size() - 1 - i); + new_dpos_proposed_irreversible_blocknum = block_num_for_i; + //idump((dpos2_lib)(block_num)(dpos_irreversible_blocknum)); + + if (i == static_cast(result.confirm_count.size() - 1)) { + result.confirm_count.resize(0); + } else { + memmove( &result.confirm_count[0], &result.confirm_count[i + 1], result.confirm_count.size() - i - 1); + result.confirm_count.resize( result.confirm_count.size() - i - 1 ); + } + + break; + } + --i; + --blocks_to_confirm; + } + + result.dpos_proposed_irreversible_blocknum = new_dpos_proposed_irreversible_blocknum; + result.dpos_irreversible_blocknum = calc_dpos_last_irreversible( proauth.producer_name ); + + if( pending_schedule.schedule.producers.size() && + result.dpos_irreversible_blocknum >= pending_schedule.schedule_lib_num ) + { + result.active_schedule = pending_schedule.schedule; + + flat_map new_producer_to_last_produced; + + for( const auto& pro : result.active_schedule.producers ) { + if( pro.producer_name == proauth.producer_name ) { + new_producer_to_last_produced[pro.producer_name] = result.block_num; + } else { + auto existing = producer_to_last_produced.find( pro.producer_name ); + if( existing != producer_to_last_produced.end() ) { + new_producer_to_last_produced[pro.producer_name] = existing->second; } else { - auto existing = producer_to_last_produced.find( pro.producer_name ); - if( existing != producer_to_last_produced.end() ) { - new_producer_to_last_produced[pro.producer_name] = existing->second; - } else { - new_producer_to_last_produced[pro.producer_name] = result.dpos_irreversible_blocknum; - } + new_producer_to_last_produced[pro.producer_name] = result.dpos_irreversible_blocknum; } } - new_producer_to_last_produced[proauth.producer_name] = result.block_num; - - result.producer_to_last_produced = std::move( new_producer_to_last_produced ); - - flat_map new_producer_to_last_implied_irb; - - for( const auto& pro : result.active_schedule.producers ) { - if( pro.producer_name == proauth.producer_name ) { - new_producer_to_last_implied_irb[pro.producer_name] = dpos_proposed_irreversible_blocknum; + } + new_producer_to_last_produced[proauth.producer_name] = result.block_num; + + result.producer_to_last_produced = std::move( new_producer_to_last_produced ); + + flat_map new_producer_to_last_implied_irb; + + for( const auto& pro : result.active_schedule.producers ) { + if( pro.producer_name == proauth.producer_name ) { + new_producer_to_last_implied_irb[pro.producer_name] = dpos_proposed_irreversible_blocknum; + } else { + auto existing = producer_to_last_implied_irb.find( pro.producer_name ); + if( existing != producer_to_last_implied_irb.end() ) { + new_producer_to_last_implied_irb[pro.producer_name] = existing->second; } else { - auto existing = producer_to_last_implied_irb.find( pro.producer_name ); - if( existing != producer_to_last_implied_irb.end() ) { - new_producer_to_last_implied_irb[pro.producer_name] = existing->second; - } else { - new_producer_to_last_implied_irb[pro.producer_name] = result.dpos_irreversible_blocknum; - } + new_producer_to_last_implied_irb[pro.producer_name] = result.dpos_irreversible_blocknum; } } - - result.producer_to_last_implied_irb = std::move( new_producer_to_last_implied_irb ); - - result.was_pending_promoted = true; - } else { - result.active_schedule = active_schedule; - result.producer_to_last_produced = producer_to_last_produced; - result.producer_to_last_produced[proauth.producer_name] = result.block_num; - result.producer_to_last_implied_irb = producer_to_last_implied_irb; - result.producer_to_last_implied_irb[proauth.producer_name] = dpos_proposed_irreversible_blocknum; } - } // !hotstuff_activated + + result.producer_to_last_implied_irb = std::move( new_producer_to_last_implied_irb ); + + result.was_pending_promoted = true; + } else { + result.active_schedule = active_schedule; + result.producer_to_last_produced = producer_to_last_produced; + result.producer_to_last_produced[proauth.producer_name] = result.block_num; + result.producer_to_last_implied_irb = producer_to_last_implied_irb; + result.producer_to_last_implied_irb[proauth.producer_name] = dpos_proposed_irreversible_blocknum; + } return result; } @@ -271,7 +205,7 @@ namespace eosio { namespace chain { emplace_extension( h.header_extensions, protocol_feature_activation::extension_id(), - fc::raw::pack( protocol_feature_activation{ std::move(new_protocol_feature_activations) } ) + fc::raw::pack( protocol_feature_activation{ .protocol_features=std::move(new_protocol_feature_activations) } ) ); } @@ -303,9 +237,7 @@ namespace eosio { namespace chain { block_header_state_legacy pending_block_header_state_legacy::_finish_next( const signed_block_header& h, const protocol_feature_set& pfs, - const std::function&, - const vector& )>& validator + validator_t& validator )&& { @@ -400,9 +332,7 @@ namespace eosio { namespace chain { const signed_block_header& h, vector&& additional_signatures, const protocol_feature_set& pfs, - const std::function&, - const vector& )>& validator, + validator_t& validator, bool skip_validate_signee )&& { @@ -429,9 +359,7 @@ namespace eosio { namespace chain { block_header_state_legacy pending_block_header_state_legacy::finish_next( signed_block_header& h, const protocol_feature_set& pfs, - const std::function&, - const vector& )>& validator, + validator_t& validator, const signer_callback_type& signer )&& { @@ -462,12 +390,10 @@ namespace eosio { namespace chain { vector&& _additional_signatures, const protocol_feature_set& pfs, bool hotstuff_activated, - const std::function&, - const vector& )>& validator, + validator_t& validator, bool skip_validate_signee )const { - return next( h.timestamp, hotstuff_activated, h.confirmed ).finish_next( h, std::move(_additional_signatures), pfs, validator, skip_validate_signee ); + return next( h.timestamp, h.confirmed ).finish_next( h, std::move(_additional_signatures), pfs, validator, skip_validate_signee ); } digest_type block_header_state_legacy::sig_digest()const { diff --git a/libraries/chain/block_state.cpp b/libraries/chain/block_state.cpp new file mode 100644 index 0000000000..4c43eadc12 --- /dev/null +++ b/libraries/chain/block_state.cpp @@ -0,0 +1,106 @@ +#include +#include + +namespace eosio::chain { + + namespace { + constexpr auto additional_sigs_eid = additional_block_signatures_extension::extension_id(); + + /** + * Given a complete signed block, extract the validated additional signatures if present; + * + * @param b complete signed block + * @param pfs protocol feature set for digest access + * @param pfa activated protocol feature set to determine if extensions are allowed + * @return the list of additional signatures + * @throws if additional signatures are present before being supported by protocol feature activations + */ + vector extract_additional_signatures( const signed_block_ptr& b, + const protocol_feature_set& pfs, + const protocol_feature_activation_set_ptr& pfa ) + { + auto exts = b->validate_and_extract_extensions(); + + if ( exts.count(additional_sigs_eid) > 0 ) { + auto& additional_sigs = std::get(exts.lower_bound(additional_sigs_eid)->second); + + return std::move(additional_sigs.signatures); + } + + return {}; + } + + /** + * Given a pending block header state, wrap the promotion to a block header state such that additional signatures + * can be allowed based on activations *prior* to the promoted block and properly injected into the signed block + * that is previously constructed and mutated by the promotion + * + * This cleans up lifetime issues involved with accessing activated protocol features and moving from the + * pending block header state + * + * @param cur the pending block header state to promote + * @param b the signed block that will receive signatures during this process + * @param pfs protocol feature set for digest access + * @param extras all the remaining parameters that pass through + * @return the block header state + * @throws if the block was signed with multiple signatures before the extension is allowed + */ + + template + block_header_state inject_additional_signatures(block_header_state&& cur, + signed_block& b, + const protocol_feature_set& pfs, + Extras&& ... extras) + { + + block_header_state result; +#if 0 + result = std::move(cur).finish_next(b, pfs, std::forward(extras)...); + auto pfa = cur.prev_activated_protocol_features; + + if (!result.additional_signatures.empty()) { + bool wtmsig_enabled = detail::is_builtin_activated(pfa, pfs, builtin_protocol_feature_t::wtmsig_block_signatures); + + EOS_ASSERT(wtmsig_enabled, block_validate_exception, + "Block has multiple signatures before activation of WTMsig Block Signatures"); + + // as an optimization we don't copy this out into the legitimate extension structure as it serializes + // the same way as the vector of signatures + static_assert(fc::reflector::total_member_count == 1); + static_assert(std::is_same_v>); + + emplace_extension(b.block_extensions, additional_sigs_eid, fc::raw::pack( result.additional_signatures )); + } +#endif + return result; + } + + } +#if 0 + + block_state::block_state(const block_header_state& prev, + signed_block_ptr b, + const protocol_feature_set& pfs, + bool hotstuff_activated, + const validator_t& validator, + bool skip_validate_signee + ) + :block_header_state( prev.next( *b, extract_additional_signatures(b, pfs, prev.activated_protocol_features), pfs, hotstuff_activated, validator, skip_validate_signee ) ) + ,block( std::move(b) ) + {} + + block_state::block_state(pending_block_header_state&& cur, + signed_block_ptr&& b, + deque&& trx_metas, + const protocol_feature_set& pfs, + const validator_t& validator, + const signer_callback_type& signer + ) + :block_header_state( inject_additional_signatures( std::move(cur), *b, pfs, validator, signer ) ) + ,block( std::move(b) ) + ,_pub_keys_recovered( true ) // called by produce_block so signature recovery of trxs must have been done + ,_cached_trxs( std::move(trx_metas) ) + {} +#endif + +} /// eosio::chain diff --git a/libraries/chain/block_state_legacy.cpp b/libraries/chain/block_state_legacy.cpp index 493ba45af3..7b303cfd91 100644 --- a/libraries/chain/block_state_legacy.cpp +++ b/libraries/chain/block_state_legacy.cpp @@ -78,9 +78,7 @@ namespace eosio { namespace chain { signed_block_ptr b, const protocol_feature_set& pfs, bool hotstuff_activated, - const std::function&, - const vector& )>& validator, + const validator_t& validator, bool skip_validate_signee ) :block_header_state_legacy( prev.next( *b, extract_additional_signatures(b, pfs, prev.activated_protocol_features), pfs, hotstuff_activated, validator, skip_validate_signee ) ) @@ -91,9 +89,7 @@ namespace eosio { namespace chain { signed_block_ptr&& b, deque&& trx_metas, const protocol_feature_set& pfs, - const std::function&, - const vector& )>& validator, + const validator_t& validator, const signer_callback_type& signer ) :block_header_state_legacy( inject_additional_signatures( std::move(cur), *b, pfs, validator, signer ) ) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index ea690241e6..5b44b322d4 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -43,7 +43,7 @@ #include #include -namespace eosio { namespace chain { +namespace eosio::chain { using resource_limits::resource_limits_manager; @@ -75,7 +75,7 @@ class maybe_session { public: maybe_session() = default; - maybe_session( maybe_session&& other) + maybe_session( maybe_session&& other) noexcept :_session(std::move(other._session)) { } @@ -116,106 +116,487 @@ class maybe_session { std::optional _session; }; -struct building_block { - building_block( const block_header_state_legacy& prev, - block_timestamp_type when, - bool hotstuff_activated, - uint16_t num_prev_blocks_to_confirm, - const vector& new_protocol_feature_activations ) - :_pending_block_header_state_legacy( prev.next( when, hotstuff_activated, num_prev_blocks_to_confirm ) ) - ,_new_protocol_feature_activations( new_protocol_feature_activations ) - ,_trx_mroot_or_receipt_digests( digests_t{} ) - {} +struct completed_block { + std::variant bsp; + + bool is_dpos() const { return std::holds_alternative(bsp); } + + deque extract_trx_metas() { + return std::visit([](auto& bsp) { return bsp->extract_trxs_metas(); }, bsp); + } - pending_block_header_state_legacy _pending_block_header_state_legacy; - std::optional _new_pending_producer_schedule; - vector _new_protocol_feature_activations; - size_t _num_new_protocol_features_that_have_activated = 0; - deque _pending_trx_metas; - deque _pending_trx_receipts; // boost deque in 1.71 with 1024 elements performs better - std::variant _trx_mroot_or_receipt_digests; - digests_t _action_receipt_digests; + const flat_set& get_activated_protocol_features() const { + return std::visit([](const auto& bsp) -> const flat_set& { + return bsp->get_activated_protocol_features()->protocol_features; }, bsp); + } + + uint32_t block_num() const { return std::visit([](const auto& bsp) { return bsp->block_num(); }, bsp); } + + block_timestamp_type timestamp() const { + return std::visit([](const auto& bsp) { return bsp->timestamp(); }, bsp); + } + + account_name producer() const { + return std::visit([](const auto& bsp) { return bsp->producer(); }, bsp); + } + + const producer_authority_schedule& active_producers() const { + return std::visit([](const auto& bsp) -> const producer_authority_schedule& { return bsp->active_schedule_auth(); }, bsp); + } + + const producer_authority_schedule& pending_producers() const { + return std::visit([](const auto& bsp) -> const producer_authority_schedule& { return bsp->pending_schedule_auth();}, bsp); + } + + bool is_protocol_feature_activated(const digest_type& digest) const { + const auto& activated_features = get_activated_protocol_features(); + return (activated_features.find(digest) != activated_features.end()); + } + + const block_signing_authority& pending_block_signing_authority() const { + return std::visit(overloaded{[](const block_state_legacy_ptr& bsp) -> const block_signing_authority& { + return bsp->valid_block_signing_authority; + }, + [](const block_state_ptr& bsp) -> const block_signing_authority& { + static block_signing_authority bsa; return bsa; //return bsp->header.producer; [greg todo] + }}, + bsp); + } }; struct assembled_block { - block_id_type _id; - pending_block_header_state_legacy _pending_block_header_state_legacy; - deque _trx_metas; - signed_block_ptr _unsigned_block; + // -------------------------------------------------------------------------------- + struct assembled_block_dpos { + block_id_type id; + pending_block_header_state_legacy pending_block_header_state; + deque trx_metas; + signed_block_ptr unsigned_block; - // if the _unsigned_block pre-dates block-signing authorities this may be present. - std::optional _new_producer_authority_cache; -}; + // if the unsigned_block pre-dates block-signing authorities this may be present. + std::optional new_producer_authority_cache; -struct completed_block { - block_state_legacy_ptr _block_state; -}; + }; -using block_stage_type = std::variant; + // -------------------------------------------------------------------------------- + struct assembled_block_if { + producer_authority active_producer_authority; + block_header_state bhs; + deque trx_metas; // Comes from building_block::pending_trx_metas + // Carried over to put into block_state (optimization for fork reorgs) + deque trx_receipts; // Comes from building_block::pending_trx_receipts + std::optional qc; // QC to add as block extension to new block + }; -struct pending_state { - pending_state( maybe_session&& s, const block_header_state_legacy& prev, - block_timestamp_type when, - bool hotstuff_activated, - uint16_t num_prev_blocks_to_confirm, - const vector& new_protocol_feature_activations ) - :_db_session( std::move(s) ) - ,_block_stage( building_block( prev, when, hotstuff_activated, num_prev_blocks_to_confirm, new_protocol_feature_activations ) ) - {} + std::variant v; - maybe_session _db_session; - block_stage_type _block_stage; - controller::block_status _block_status = controller::block_status::ephemeral; - std::optional _producer_block_id; - controller::block_report _block_report{}; + bool is_dpos() const { return std::holds_alternative(v); } - /** @pre _block_stage cannot hold completed_block alternative */ - const pending_block_header_state_legacy& get_pending_block_header_state_legacy()const { - if( std::holds_alternative(_block_stage) ) - return std::get(_block_stage)._pending_block_header_state_legacy; + template + R apply_dpos(F&& f) { + if constexpr (std::is_same_v) + std::visit(overloaded{[&](assembled_block_dpos& ab) { std::forward(f)(ab); }, + [&](assembled_block_if& ab) {}}, v); + else + return std::visit(overloaded{[&](assembled_block_dpos& ab) -> R { return std::forward(f)(ab); }, + [&](assembled_block_if& ab) -> R { return {}; }}, v); + } - return std::get(_block_stage)._pending_block_header_state_legacy; + template + R apply_hs(F&& f) { + if constexpr (std::is_same_v) + std::visit(overloaded{[&](assembled_block_dpos& ab) {}, + [&](assembled_block_if& ab) { std::forward(f)(ab); }}, v); + else + return std::visit(overloaded{[&](assembled_block_dpos& ab) -> R { return {}; }, + [&](assembled_block_if& ab) -> R { return std::forward(f)(ab); }}, v); } deque extract_trx_metas() { - if( std::holds_alternative(_block_stage) ) - return std::move( std::get(_block_stage)._pending_trx_metas ); + return std::visit([](auto& ab) { return std::move(ab.trx_metas); }, v); + } + + bool is_protocol_feature_activated(const digest_type& digest) const { + // Calling is_protocol_feature_activated during the assembled_block stage is not efficient. + // We should avoid doing it. + // In fact for now it isn't even implemented. + EOS_THROW( misc_exception, + "checking if protocol feature is activated in the assembled_block stage is not yet supported" ); + // TODO: implement this + } + + const block_id_type& id() const { + return std::visit( + overloaded{[](const assembled_block_dpos& ab) -> const block_id_type& { return ab.id; }, + [](const assembled_block_if& ab) -> const block_id_type& { return ab.bhs.id; }}, + v); + } + + block_timestamp_type timestamp() const { + return std::visit( + overloaded{[](const assembled_block_dpos& ab) { return ab.pending_block_header_state.timestamp; }, + [](const assembled_block_if& ab) { return ab.bhs.header.timestamp; }}, + v); + } + + uint32_t block_num() const { + return std::visit( + overloaded{[](const assembled_block_dpos& ab) { return ab.pending_block_header_state.block_num; }, + [](const assembled_block_if& ab) { return ab.bhs.block_num(); }}, + v); + } - if( std::holds_alternative(_block_stage) ) - return std::move( std::get(_block_stage)._trx_metas ); + account_name producer() const { + return std::visit( + overloaded{[](const assembled_block_dpos& ab) { return ab.pending_block_header_state.producer; }, + [](const assembled_block_if& ab) { return ab.active_producer_authority.producer_name; }}, + v); + } - return std::get(_block_stage)._block_state->extract_trxs_metas(); + const producer_authority_schedule& active_producers() const { + return std::visit(overloaded{[](const assembled_block_dpos& ab) -> const producer_authority_schedule& { + return ab.pending_block_header_state.active_schedule; + }, + [](const assembled_block_if& ab) -> const producer_authority_schedule& { + static producer_authority_schedule pas; return pas; // [greg todo] + }}, + v); } - bool is_protocol_feature_activated( const digest_type& feature_digest )const { - if( std::holds_alternative(_block_stage) ) { - auto& bb = std::get(_block_stage); - const auto& activated_features = bb._pending_block_header_state_legacy.prev_activated_protocol_features->protocol_features; + using opt_pas = const std::optional; - if( activated_features.find( feature_digest ) != activated_features.end() ) return true; + opt_pas& pending_producers() const { + return std::visit( + overloaded{[](const assembled_block_dpos& ab) -> opt_pas& { return ab.new_producer_authority_cache; }, + [](const assembled_block_if& ab) -> opt_pas& { + static opt_pas empty; + return empty; // [greg todo] + }}, + v); + } - if( bb._num_new_protocol_features_that_have_activated == 0 ) return false; + const block_signing_authority& pending_block_signing_authority() const { + return std::visit(overloaded{[](const assembled_block_dpos& ab) -> const block_signing_authority& { + return ab.pending_block_header_state.valid_block_signing_authority; + }, + [](const assembled_block_if& ab) -> const block_signing_authority& { + return ab.active_producer_authority.authority; + }}, + v); + } + + completed_block make_completed_block(const protocol_feature_set& pfs, validator_t validator, + const signer_callback_type& signer) { + return std::visit(overloaded{[&](assembled_block_dpos& ab) { + auto bsp = std::make_shared( + std::move(ab.pending_block_header_state), std::move(ab.unsigned_block), + std::move(ab.trx_metas), pfs, validator, signer); + + return completed_block{block_state_legacy_ptr{std::move(bsp)}}; + }, + [&](assembled_block_if& ab) { + return completed_block{}; /* [greg todo] */ + }}, + v); + } +}; - auto end = bb._new_protocol_feature_activations.begin() + bb._num_new_protocol_features_that_have_activated; - return (std::find( bb._new_protocol_feature_activations.begin(), end, feature_digest ) != end); +struct building_block { + // -------------------------------------------------------------------------------- + struct building_block_common { + using checksum_or_digests = std::variant; + const vector new_protocol_feature_activations; + size_t num_new_protocol_features_that_have_activated = 0; + deque pending_trx_metas; + deque pending_trx_receipts; + checksum_or_digests trx_mroot_or_receipt_digests {digests_t{}}; + digests_t action_receipt_digests; + + building_block_common(const vector& new_protocol_feature_activations) : + new_protocol_feature_activations(new_protocol_feature_activations) + {} + + bool is_protocol_feature_activated(const digest_type& digest, const flat_set& activated_features) const { + if (activated_features.find(digest) != activated_features.end()) + return true; + if (num_new_protocol_features_that_have_activated == 0) + return false; + auto end = new_protocol_feature_activations.begin() + num_new_protocol_features_that_have_activated; + return (std::find(new_protocol_feature_activations.begin(), end, digest) != end); + } + + std::function make_block_restore_point() { + auto orig_trx_receipts_size = pending_trx_receipts.size(); + auto orig_trx_metas_size = pending_trx_metas.size(); + auto orig_trx_receipt_digests_size = std::holds_alternative(trx_mroot_or_receipt_digests) ? + std::get(trx_mroot_or_receipt_digests).size() : 0; + auto orig_action_receipt_digests_size = action_receipt_digests.size(); + return [this, + orig_trx_receipts_size, + orig_trx_metas_size, + orig_trx_receipt_digests_size, + orig_action_receipt_digests_size]() + { + pending_trx_receipts.resize(orig_trx_receipts_size); + pending_trx_metas.resize(orig_trx_metas_size); + if (std::holds_alternative(trx_mroot_or_receipt_digests)) + std::get(trx_mroot_or_receipt_digests).resize(orig_trx_receipt_digests_size); + action_receipt_digests.resize(orig_action_receipt_digests_size); + }; + } + }; + + // -------------------------------------------------------------------------------- + struct building_block_dpos : public building_block_common { + pending_block_header_state_legacy pending_block_header_state; + std::optional new_pending_producer_schedule; + + building_block_dpos( const block_header_state_legacy& prev, + block_timestamp_type when, + uint16_t num_prev_blocks_to_confirm, + const vector& new_protocol_feature_activations) + : building_block_common(new_protocol_feature_activations), + pending_block_header_state(prev.next(when, num_prev_blocks_to_confirm)) + {} + + bool is_protocol_feature_activated(const digest_type& digest) const { + return building_block_common::is_protocol_feature_activated( + digest, pending_block_header_state.prev_activated_protocol_features->protocol_features); } - if( std::holds_alternative(_block_stage) ) { - // Calling is_protocol_feature_activated during the assembled_block stage is not efficient. - // We should avoid doing it. - // In fact for now it isn't even implemented. - EOS_THROW( misc_exception, - "checking if protocol feature is activated in the assembled_block stage is not yet supported" ); - // TODO: implement this + uint32_t get_block_num() const { return pending_block_header_state.block_num; } + }; + + // -------------------------------------------------------------------------------- + struct building_block_if : public building_block_common { + const block_id_type parent_id; // Comes from building_block_input::parent_id + const block_timestamp_type timestamp; // Comes from building_block_input::timestamp + const producer_authority active_producer_authority; // Comes from parent.get_scheduled_producer(timestamp) + const vector new_protocol_feature_activations; // Comes from building_block_input::new_protocol_feature_activations + const protocol_feature_activation_set_ptr prev_activated_protocol_features; // Cached: parent.activated_protocol_features() + const proposer_policy_ptr active_proposer_policy; // Cached: parent.get_next_active_proposer_policy(timestamp) + const uint32_t block_num; // Cached: parent.block_num() + 1 + + // Members below (as well as non-const members of building_block_common) start from initial state and are mutated as the block is built. + std::optional new_proposer_policy; + std::optional new_finalizer_policy; + + building_block_if(const block_header_state& parent, const building_block_input& input) + : building_block_common(input.new_protocol_feature_activations) + , parent_id(input.parent_id) + , timestamp(input.timestamp) + , active_producer_authority{input.producer, + [&]() -> block_signing_authority { + const auto& pas = parent.proposer_policy->proposer_schedule; + for (const auto& pa : pas.producers) + if (pa.producer_name == input.producer) + return pa.authority; + assert(0); // we should find the authority + return {}; + }()} + , prev_activated_protocol_features(parent.activated_protocol_features) + , active_proposer_policy(parent.proposer_policy) + , block_num(parent.block_num() + 1) {} + + bool is_protocol_feature_activated(const digest_type& digest) const { + return building_block_common::is_protocol_feature_activated(digest, prev_activated_protocol_features->protocol_features); } - const auto& activated_features = std::get(_block_stage)._block_state->activated_protocol_features->protocol_features; - return (activated_features.find( feature_digest ) != activated_features.end()); + uint32_t get_block_num() const { return block_num; } + + }; + + std::variant v; + + // dpos constructor + building_block(const block_header_state_legacy& prev, block_timestamp_type when, uint16_t num_prev_blocks_to_confirm, + const vector& new_protocol_feature_activations) : + v(building_block_dpos(prev, when, num_prev_blocks_to_confirm, new_protocol_feature_activations)) + {} + + bool is_dpos() const { return std::holds_alternative(v); } + + // if constructor + building_block(const block_header_state& prev, const building_block_input& bbi) : + v(building_block_if(prev, bbi)) + {} + + template + R apply_dpos(F&& f) { + if constexpr (std::is_same_v) + std::visit(overloaded{[&](building_block_dpos& bb) { std::forward(f)(bb); }, + [&](building_block_if& bb) {}}, v); + else + return std::visit(overloaded{[&](building_block_dpos& bb) -> R { return std::forward(f)(bb); }, + [&](building_block_if& bb) -> R { return {}; }}, v); + } + + template + R apply_hs(F&& f) { + if constexpr (std::is_same_v) + std::visit(overloaded{[&](building_block_dpos& bb) {}, + [&](building_block_if& bb) { std::forward(f)(bb); }}, v); + else + return std::visit(overloaded{[&](building_block_dpos& bb) -> R { return {}; }, + [&](building_block_if& bb) -> R { return std::forward(f)(bb); }}, v); + } + + deque extract_trx_metas() { + return std::visit([](auto& bb) { return std::move(bb.pending_trx_metas); }, v); + } + + bool is_protocol_feature_activated(const digest_type& digest) const { + return std::visit([&digest](const auto& bb) { return bb.is_protocol_feature_activated(digest); }, v); + } + + std::function make_block_restore_point() { + return std::visit([](auto& bb) { return bb.make_block_restore_point(); }, v); + } + + uint32_t block_num() const { + return std::visit([](const auto& bb) { return bb.get_block_num(); }, v); + } + + block_timestamp_type timestamp() const { + return std::visit( + overloaded{[](const building_block_dpos& bb) { return bb.pending_block_header_state.timestamp; }, + [](const building_block_if& bb) { return bb.timestamp; }}, + v); + } + + account_name producer() const { + return std::visit( + overloaded{[](const building_block_dpos& bb) { return bb.pending_block_header_state.producer; }, + [](const building_block_if& bb) { return bb.active_producer_authority.producer_name; }}, + v); + } + + const vector& new_protocol_feature_activations() { + return std::visit([](auto& bb) -> const vector& { return bb.new_protocol_feature_activations; }, v); + } + + const block_signing_authority& pending_block_signing_authority() const { + return std::visit(overloaded{[](const building_block_dpos& bb) -> const block_signing_authority& { + return bb.pending_block_header_state.valid_block_signing_authority; + }, + [](const building_block_if& bb) -> const block_signing_authority& { + return bb.active_producer_authority.authority; + }}, + v); + } + + size_t& num_new_protocol_features_activated() { + return std::visit([](auto& bb) -> size_t& { return bb.num_new_protocol_features_that_have_activated; }, v); + } + + deque& pending_trx_metas() { + return std::visit([](auto& bb) -> deque& { return bb.pending_trx_metas; }, v); + } + + deque& pending_trx_receipts() { + return std::visit([](auto& bb) -> deque& { return bb.pending_trx_receipts; }, v); + } + + building_block_common::checksum_or_digests& trx_mroot_or_receipt_digests() { + return std::visit( + [](auto& bb) -> building_block_common::checksum_or_digests& { return bb.trx_mroot_or_receipt_digests; }, v); + } + + digests_t& action_receipt_digests() { + return std::visit([](auto& bb) -> digests_t& { return bb.action_receipt_digests; }, v); + } + + const producer_authority_schedule& active_producers() const { + return std::visit(overloaded{[](const building_block_dpos& bb) -> const producer_authority_schedule& { + return bb.pending_block_header_state.active_schedule; + }, + [](const building_block_if& bb) -> const producer_authority_schedule& { + return bb.active_proposer_policy->proposer_schedule; + }}, + v); + } + + const producer_authority_schedule& pending_producers() const { + return std::visit(overloaded{[](const building_block_dpos& bb) -> const producer_authority_schedule& { + if (bb.new_pending_producer_schedule) + return *bb.new_pending_producer_schedule; + return bb.pending_block_header_state.prev_pending_schedule.schedule; + }, + [](const building_block_if& bb) -> const producer_authority_schedule& { + static producer_authority_schedule empty; + return empty; // [greg todo] + }}, + v); + } +}; + + +using block_stage_type = std::variant; + +struct pending_state { + pending_state( maybe_session&& s, + const block_header_state_legacy& prev, + block_timestamp_type when, + uint16_t num_prev_blocks_to_confirm, + const vector& new_protocol_feature_activations ) + :_db_session( std::move(s) ) + ,_block_stage( building_block( prev, when, num_prev_blocks_to_confirm, new_protocol_feature_activations ) ) + {} + + maybe_session _db_session; + block_stage_type _block_stage; + controller::block_status _block_status = controller::block_status::ephemeral; + std::optional _producer_block_id; + controller::block_report _block_report{}; + + deque extract_trx_metas() { + return std::visit([](auto& stage) { return stage.extract_trx_metas(); }, _block_stage); + } + + bool is_protocol_feature_activated(const digest_type& digest) const { + return std::visit([&](const auto& stage) { return stage.is_protocol_feature_activated(digest); }, _block_stage); + } + + block_timestamp_type timestamp() const { + return std::visit([](const auto& stage) { return stage.timestamp(); }, _block_stage); + } + + uint32_t block_num() const { + return std::visit([](const auto& stage) { return stage.block_num(); }, _block_stage); + } + + account_name producer() const { + return std::visit([](const auto& stage) { return stage.producer(); }, _block_stage); } void push() { _db_session.push(); } + + bool is_dpos() const { return std::visit([](const auto& stage) { return stage.is_dpos(); }, _block_stage); } + + const block_signing_authority& pending_block_signing_authority() const { + return std::visit( + [](const auto& stage) -> const block_signing_authority& { return stage.pending_block_signing_authority(); }, + _block_stage); + } + + const producer_authority_schedule& active_producers() const { + return std::visit( + [](const auto& stage) -> const producer_authority_schedule& { return stage.active_producers(); }, + _block_stage); + } + +#if 0 + // [greg todo] maybe we don't need this and we can have the implementation in controller::pending_producers() + const producer_authority_schedule& pending_producers() const { + return std::visit( + overloaded{ + [](const building_block& bb) -> const producer_authority_schedule& { return bb.pending_producers(); }, + [](const assembled_block& ab) -> const producer_authority_schedule& { return ab.pending_producers(); }, + [](const completed_block& cb) -> const producer_authority_schedule& { return cb.pending_producers(); }}, + _block_stage); + } +#endif }; struct controller_impl { @@ -232,14 +613,166 @@ struct controller_impl { reset_new_handler() { std::set_new_handler([](){ throw std::bad_alloc(); }); } }; + template + struct block_data_gen_t { + public: + using bs = bsp::element_type; + using bhs = bhsp::element_type; + using fork_db_t = fork_database; + + bsp head; + fork_db_t fork_db; + + block_data_gen_t(const std::filesystem::path& path) : fork_db(path) {} + + bsp fork_db_head(bool irreversible_mode) const { + if (irreversible_mode) { + // When in IRREVERSIBLE mode fork_db blocks are marked valid when they become irreversible so that + // fork_db.head() returns irreversible block + // Use pending_head since this method should return the chain head and not last irreversible. + return fork_db.pending_head(); + } else { + return fork_db.head(); + } + } + + bsp fork_db_root() const { return fork_db.root(); } + + bsp fork_db_head() const { return fork_db.head(); } + + void fork_db_open(validator_t& validator) { return fork_db.open(validator); } + + void fork_db_reset_to_head() { return fork_db.reset(*head); } + + template + R apply(F &f) { if constexpr (std::is_same_v) f(fork_db, head); else return f(fork_db, head); } + + uint32_t pop_block() { + auto prev = fork_db.get_block( head->previous() ); + + if( !prev ) { + EOS_ASSERT( fork_db.root()->id() == head->previous(), block_validate_exception, "attempt to pop beyond last irreversible block" ); + prev = fork_db.root(); + } + + EOS_ASSERT( head->block, block_validate_exception, "attempting to pop a block that was sparsely loaded from a snapshot"); + head = prev; + + return prev->block_num(); + } + }; + + using block_data_legacy_t = block_data_gen_t; + using block_data_new_t = block_data_gen_t; + + + struct block_data_t { + using block_data_variant = std::variant; + + block_data_variant v; + + uint32_t head_block_num() const { return std::visit([](const auto& bd) { return bd.head->block_num(); }, v); } + block_timestamp_type head_block_time() const { return std::visit([](const auto& bd) { return bd.head->timestamp(); }, v); } + account_name head_block_producer() const { return std::visit([](const auto& bd) { return bd.head->producer(); }, v); } + + protocol_feature_activation_set_ptr head_activated_protocol_features() const { return std::visit([](const auto& bd) { + return bd.head->get_activated_protocol_features(); }, v); + } + + const producer_authority_schedule& head_active_schedule_auth() { + return std::visit([](const auto& bd) -> const producer_authority_schedule& { return bd.head->active_schedule_auth(); }, v); + } + + const producer_authority_schedule& head_pending_schedule_auth() { + return std::visit([](const auto& bd) -> const producer_authority_schedule& { return bd.head->pending_schedule_auth(); }, v); + } + + const block_id_type& head_block_id() const { + return std::visit([](const auto& bd) -> const block_id_type& { return bd.head->id(); }, v); + } + + const block_header& head_block_header() const { + return std::visit([](const auto& bd) -> const block_header& { return bd.head->header; }, v); + } + + const signed_block_ptr& head_block() const { + return std::visit([](const auto& bd) -> const signed_block_ptr& { return bd.head->block; }, v); + } + + // --------------- access fork_db head ---------------------------------------------------------------------- + bool fork_db_has_head() const { + return std::visit([&](const auto& bd) { return !!bd.fork_db_head(); }, v); + } + + uint32_t fork_db_head_block_num(bool irreversible_mode) const { + return std::visit([&](const auto& bd) { return bd.fork_db_head(irreversible_mode)->block_num(); }, v); + } + + const block_id_type& fork_db_head_block_id(bool irreversible_mode) const { + return std::visit([&](const auto& bd) -> const block_id_type& { return bd.fork_db_head(irreversible_mode)->id(); }, v); + } + + uint32_t fork_db_head_irreversible_blocknum(bool irreversible_mode) const { + return std::visit([&](const auto& bd) { return bd.fork_db_head(irreversible_mode)->irreversible_blocknum(); }, v); + } + + // --------------- access fork_db root ---------------------------------------------------------------------- + bool fork_db_has_root() const { + return std::visit([&](const auto& bd) { return !!bd.fork_db_root(); }, v); + } + + const block_id_type& fork_db_root_block_id() const { + return std::visit([&](const auto& bd) -> const block_id_type& { return bd.fork_db_root()->id(); }, v); + } + + uint32_t fork_db_root_block_num() const { + return std::visit([&](const auto& bd) { return bd.fork_db_root()->block_num(); }, v); + } + + block_timestamp_type fork_db_root_timestamp() const { + return std::visit([&](const auto& bd) { return bd.fork_db_root()->timestamp(); }, v); + } + + // --------------- fork_db APIs ---------------------------------------------------------------------- + uint32_t pop_block() { return std::visit([](auto& bd) { return bd.pop_block(); }, v); } + + void fork_db_open(validator_t& validator) { return std::visit([&](auto& bd) { bd.fork_db_open(validator); }, v); } + + void fork_db_reset_to_head() { return std::visit([&](auto& bd) { bd.fork_db_reset_to_head(); }, v); } + + signed_block_ptr fork_db_fetch_block_by_id( const block_id_type& id ) const { + return std::visit([&](const auto& bd) -> signed_block_ptr { + auto bsp = bd.fork_db.get_block(id); + return bsp ? bsp->block : nullptr; + }, v); + } + + template + R apply(F &f) { + if constexpr (std::is_same_v) + std::visit([&](auto& bd) { bd.template apply(f); }, v); + else + return std::visit([&](auto& bd) -> R { return bd.template apply(f); }, v); + } + + template + R apply_dpos(F& f) { + if constexpr (std::is_same_v) + std::visit(overloaded{[&](block_data_legacy_t& bd) { bd.template apply(f); }, + [&](block_data_new_t& bd) {}}, v); + else + return std::visit(overloaded{[&](block_data_legacy_t& bd) -> R { return bd.template apply(f); }, + [&](block_data_new_t& bd) -> R { return {}; }}, v); + } + }; + reset_new_handler rnh; // placed here to allow for this to be set before constructing the other fields controller& self; std::function shutdown; chainbase::database db; block_log blog; std::optional pending; - block_state_legacy_ptr head; - fork_database fork_db; + block_data_t block_data; // includes `head` aand `fork_db` std::optional pacemaker; std::atomic hs_irreversible_block_num{0}; resource_limits_manager resource_limits; @@ -271,21 +804,11 @@ struct controller_impl { map< account_name, map > apply_handlers; unordered_map< builtin_protocol_feature_t, std::function, enum_hash > protocol_feature_activation_handlers; - void pop_block() { - auto prev = fork_db.get_block( head->header.previous ); - - if( !prev ) { - EOS_ASSERT( fork_db.root()->id == head->header.previous, block_validate_exception, "attempt to pop beyond last irreversible block" ); - prev = fork_db.root(); - } - - EOS_ASSERT( head->block, block_validate_exception, "attempting to pop a block that was sparsely loaded from a snapshot"); - - head = prev; + void pop_block() { + uint32_t prev_block_num = block_data.pop_block(); db.undo(); - - protocol_features.popped_blocks_to( prev->block_num ); + protocol_features.popped_blocks_to(prev_block_num); } template @@ -314,7 +837,9 @@ struct controller_impl { cfg.read_only ? database::read_only : database::read_write, cfg.state_size, false, cfg.db_map_mode ), blog( cfg.blocks_dir, cfg.blog ), - fork_db( cfg.blocks_dir / config::reversible_blocks_dir_name ), + block_data(block_data_t::block_data_variant{ + std::in_place_type, // [greg todo] create correct type depending on whether IF activated + std::filesystem::path{cfg.blocks_dir / config::reversible_blocks_dir_name}}), resource_limits( db, [&s](bool is_trx_transient) { return s.get_deep_mind_logger(is_trx_transient); }), authorization( s, db ), protocol_features( std::move(pfs), [&s](bool is_trx_transient) { return s.get_deep_mind_logger(is_trx_transient); } ), @@ -324,11 +849,10 @@ struct controller_impl { thread_pool(), wasmif( conf.wasm_runtime, conf.eosvmoc_tierup, db, conf.state_dir, conf.eosvmoc_config, !conf.profile_accounts.empty() ) { - fork_db.open( [this]( block_timestamp_type timestamp, - const flat_set& cur_features, - const vector& new_features ) - { check_protocol_features( timestamp, cur_features, new_features ); } - ); + block_data.fork_db_open([this](block_timestamp_type timestamp, const flat_set& cur_features, + const vector& new_features) { + check_protocol_features(timestamp, cur_features, new_features); + }); thread_pool.start( cfg.thread_pool_size, [this]( const fc::exception& e ) { elog( "Exception in chain thread pool, exiting: ${e}", ("e", e.to_detail_string()) ); @@ -414,72 +938,74 @@ struct controller_impl { } void log_irreversible() { - EOS_ASSERT( fork_db.root(), fork_database_exception, "fork database not properly initialized" ); + EOS_ASSERT( fork_db_has_root(), fork_database_exception, "fork database not properly initialized" ); const std::optional log_head_id = blog.head_id(); const bool valid_log_head = !!log_head_id; const auto lib_num = valid_log_head ? block_header::num_from_id(*log_head_id) : (blog.first_block_num() - 1); - auto root_id = fork_db.root()->id; + auto root_id = fork_db_root_block_id(); if( valid_log_head ) { EOS_ASSERT( root_id == log_head_id, fork_database_exception, "fork database root does not match block log head" ); } else { - EOS_ASSERT( fork_db.root()->block_num == lib_num, fork_database_exception, + EOS_ASSERT( fork_db_root_block_num() == lib_num, fork_database_exception, "The first block ${lib_num} when starting with an empty block log should be the block after fork database root ${bn}.", - ("lib_num", lib_num)("bn", fork_db.root()->block_num) ); + ("lib_num", lib_num)("bn", fork_db_root_block_num()) ); } - const auto fork_head = fork_db_head(); const uint32_t hs_lib = hs_irreversible_block_num; - const uint32_t new_lib = hs_lib > 0 ? hs_lib : fork_head->dpos_irreversible_blocknum; + const uint32_t new_lib = hs_lib > 0 ? hs_lib : fork_db_head_irreversible_blocknum(); if( new_lib <= lib_num ) return; - auto branch = fork_db.fetch_branch( fork_head->id, new_lib ); - try { - - std::vector>> v; - v.reserve( branch.size() ); - for( auto bitr = branch.rbegin(); bitr != branch.rend(); ++bitr ) { - v.emplace_back( post_async_task( thread_pool.get_executor(), [b=(*bitr)->block]() { return fc::raw::pack(*b); } ) ); - } - auto it = v.begin(); - - for( auto bitr = branch.rbegin(); bitr != branch.rend(); ++bitr ) { - if( read_mode == db_read_mode::IRREVERSIBLE ) { - controller::block_report br; - apply_block( br, *bitr, controller::block_status::complete, trx_meta_cache_lookup{} ); + auto mark_branch_irreversible = [&](auto& fork_db, auto& head) { + auto branch = fork_db.fetch_branch( fork_db_head_block_id(), new_lib ); + try { + std::vector>> v; + v.reserve( branch.size() ); + for( auto bitr = branch.rbegin(); bitr != branch.rend(); ++bitr ) { + v.emplace_back( post_async_task( thread_pool.get_executor(), [b=(*bitr)->block]() { return fc::raw::pack(*b); } ) ); } + auto it = v.begin(); - emit( self.irreversible_block, std::tie((*bitr)->block, (*bitr)->id) ); + for( auto bitr = branch.rbegin(); bitr != branch.rend(); ++bitr ) { + if( read_mode == db_read_mode::IRREVERSIBLE ) { + controller::block_report br; + apply_block( br, *bitr, controller::block_status::complete, trx_meta_cache_lookup{} ); + } - // blog.append could fail due to failures like running out of space. - // Do it before commit so that in case it throws, DB can be rolled back. - blog.append( (*bitr)->block, (*bitr)->id, it->get() ); - ++it; + emit( self.irreversible_block, std::tie((*bitr)->block, (*bitr)->id()) ); - db.commit( (*bitr)->block_num ); - root_id = (*bitr)->id; + // blog.append could fail due to failures like running out of space. + // Do it before commit so that in case it throws, DB can be rolled back. + blog.append( (*bitr)->block, (*bitr)->id(), it->get() ); + ++it; + + db.commit( (*bitr)->block_num() ); + root_id = (*bitr)->id(); + } + } catch( std::exception& ) { + if( root_id != fork_db.root()->id() ) { + fork_db.advance_root( root_id ); + } + throw; } - } catch( std::exception& ) { - if( root_id != fork_db.root()->id ) { + + //db.commit( new_lib ); // redundant + + if( root_id != fork_db.root()->id() ) { + branch.emplace_back(fork_db.root()); fork_db.advance_root( root_id ); } - throw; - } - - //db.commit( new_lib ); // redundant - if( root_id != fork_db.root()->id ) { - branch.emplace_back(fork_db.root()); - fork_db.advance_root( root_id ); - } + // delete branch in thread pool + boost::asio::post( thread_pool.get_executor(), [branch{std::move(branch)}]() {} ); + }; - // delete branch in thread pool - boost::asio::post( thread_pool.get_executor(), [branch{std::move(branch)}]() {} ); + block_data.apply(mark_branch_irreversible); } /** @@ -487,107 +1013,117 @@ struct controller_impl { */ void initialize_blockchain_state(const genesis_state& genesis) { wlog( "Initializing new blockchain with genesis state" ); - producer_authority_schedule initial_schedule = { 0, { producer_authority{config::system_account_name, block_signing_authority_v0{ 1, {{genesis.initial_key, 1}} } } } }; - legacy::producer_schedule_type initial_legacy_schedule{ 0, {{config::system_account_name, genesis.initial_key}} }; - - block_header_state_legacy genheader; - genheader.active_schedule = initial_schedule; - genheader.pending_schedule.schedule = initial_schedule; - // NOTE: if wtmsig block signatures are enabled at genesis time this should be the hash of a producer authority schedule - genheader.pending_schedule.schedule_hash = fc::sha256::hash(initial_legacy_schedule); - genheader.header.timestamp = genesis.initial_timestamp; - genheader.header.action_mroot = genesis.compute_chain_id(); - genheader.id = genheader.header.calculate_id(); - genheader.block_num = genheader.header.block_num(); - - head = std::make_shared(); - static_cast(*head) = genheader; - head->activated_protocol_features = std::make_shared(); - head->block = std::make_shared(genheader.header); - db.set_revision( head->block_num ); + + auto init_blockchain = [&genesis](auto& fork_db, auto& head) { + producer_authority_schedule initial_schedule = { 0, { producer_authority{config::system_account_name, block_signing_authority_v0{ 1, {{genesis.initial_key, 1}} } } } }; + legacy::producer_schedule_type initial_legacy_schedule{ 0, {{config::system_account_name, genesis.initial_key}} }; + + block_header_state_legacy genheader; + genheader.active_schedule = initial_schedule; + genheader.pending_schedule.schedule = initial_schedule; + // NOTE: if wtmsig block signatures are enabled at genesis time this should be the hash of a producer authority schedule + genheader.pending_schedule.schedule_hash = fc::sha256::hash(initial_legacy_schedule); + genheader.header.timestamp = genesis.initial_timestamp; + genheader.header.action_mroot = genesis.compute_chain_id(); + genheader.id = genheader.header.calculate_id(); + genheader.block_num = genheader.header.block_num(); + + head = std::make_shared(); + static_cast(*head) = genheader; + head->activated_protocol_features = std::make_shared(); + head->block = std::make_shared(genheader.header); + }; + + block_data.apply_dpos(init_blockchain); // assuming here that genesis_state is always dpos + + db.set_revision( head_block_num() ); initialize_database(genesis); } void replay(std::function check_shutdown) { auto blog_head = blog.head(); - if( !fork_db.root() ) { - fork_db.reset( *head ); + if( !fork_db_has_root() ) { + block_data.fork_db_reset_to_head(); if (!blog_head) return; } replaying = true; - auto start_block_num = head->block_num + 1; + auto start_block_num = head_block_num() + 1; auto start = fc::time_point::now(); std::exception_ptr except_ptr; - if( blog_head && start_block_num <= blog_head->block_num() ) { - ilog( "existing block log, attempting to replay from ${s} to ${n} blocks", - ("s", start_block_num)("n", blog_head->block_num()) ); - try { - while( auto next = blog.read_block_by_num( head->block_num + 1 ) ) { - replay_push_block( next, controller::block_status::irreversible ); - if( check_shutdown() ) break; - if( next->block_num() % 500 == 0 ) { - ilog( "${n} of ${head}", ("n", next->block_num())("head", blog_head->block_num()) ); + auto replay_blog = [&](auto& fork_db, auto& head) { + if( blog_head && start_block_num <= blog_head->block_num() ) { + ilog( "existing block log, attempting to replay from ${s} to ${n} blocks", + ("s", start_block_num)("n", blog_head->block_num()) ); + try { + while( auto next = blog.read_block_by_num( head->block_num() + 1 ) ) { + replay_push_block( next, controller::block_status::irreversible ); + if( check_shutdown() ) break; + if( next->block_num() % 500 == 0 ) { + ilog( "${n} of ${head}", ("n", next->block_num())("head", blog_head->block_num()) ); + } } + } catch( const database_guard_exception& e ) { + except_ptr = std::current_exception(); } - } catch( const database_guard_exception& e ) { - except_ptr = std::current_exception(); + ilog( "${n} irreversible blocks replayed", ("n", 1 + head->block_num() - start_block_num) ); + + auto pending_head = fork_db.pending_head(); + if( pending_head ) { + ilog( "fork database head ${h}, root ${r}", ("h", pending_head->block_num())( "r", fork_db.root()->block_num() ) ); + if( pending_head->block_num() < head->block_num() || head->block_num() < fork_db.root()->block_num() ) { + ilog( "resetting fork database with new last irreversible block as the new root: ${id}", ("id", head->id()) ); + fork_db.reset( *head ); + } else if( head->block_num() != fork_db.root()->block_num() ) { + auto new_root = fork_db.search_on_branch( pending_head->id(), head->block_num() ); + EOS_ASSERT( new_root, fork_database_exception, + "unexpected error: could not find new LIB in fork database" ); + ilog( "advancing fork database root to new last irreversible block within existing fork database: ${id}", + ("id", new_root->id()) ); + fork_db.mark_valid( new_root ); + fork_db.advance_root( new_root->id() ); + } + } + + // if the irreverible log is played without undo sessions enabled, we need to sync the + // revision ordinal to the appropriate expected value here. + if( self.skip_db_sessions( controller::block_status::irreversible ) ) + db.set_revision( head->block_num() ); + } else { + ilog( "no irreversible blocks need to be replayed" ); } - ilog( "${n} irreversible blocks replayed", ("n", 1 + head->block_num - start_block_num) ); - - auto pending_head = fork_db.pending_head(); - if( pending_head ) { - ilog( "fork database head ${h}, root ${r}", ("h", pending_head->block_num)( "r", fork_db.root()->block_num ) ); - if( pending_head->block_num < head->block_num || head->block_num < fork_db.root()->block_num ) { - ilog( "resetting fork database with new last irreversible block as the new root: ${id}", ("id", head->id) ); - fork_db.reset( *head ); - } else if( head->block_num != fork_db.root()->block_num ) { - auto new_root = fork_db.search_on_branch( pending_head->id, head->block_num ); - EOS_ASSERT( new_root, fork_database_exception, - "unexpected error: could not find new LIB in fork database" ); - ilog( "advancing fork database root to new last irreversible block within existing fork database: ${id}", - ("id", new_root->id) ); - fork_db.mark_valid( new_root ); - fork_db.advance_root( new_root->id ); + + if (snapshot_head_block != 0 && !blog_head) { + // loading from snapshot without a block log so fork_db can't be considered valid + fork_db.reset( *head ); + } else if( !except_ptr && !check_shutdown() && fork_db.head() ) { + auto head_block_num = head->block_num(); + auto branch = fork_db.fetch_branch( fork_db.head()->id() ); + int rev = 0; + for( auto i = branch.rbegin(); i != branch.rend(); ++i ) { + if( check_shutdown() ) break; + if( (*i)->block_num() <= head_block_num ) continue; + ++rev; + replay_push_block( (*i)->block, controller::block_status::validated ); } + ilog( "${n} reversible blocks replayed", ("n",rev) ); } - // if the irreverible log is played without undo sessions enabled, we need to sync the - // revision ordinal to the appropriate expected value here. - if( self.skip_db_sessions( controller::block_status::irreversible ) ) - db.set_revision( head->block_num ); - } else { - ilog( "no irreversible blocks need to be replayed" ); - } - - if (snapshot_head_block != 0 && !blog_head) { - // loading from snapshot without a block log so fork_db can't be considered valid - fork_db.reset( *head ); - } else if( !except_ptr && !check_shutdown() && fork_db.head() ) { - auto head_block_num = head->block_num; - auto branch = fork_db.fetch_branch( fork_db.head()->id ); - int rev = 0; - for( auto i = branch.rbegin(); i != branch.rend(); ++i ) { - if( check_shutdown() ) break; - if( (*i)->block_num <= head_block_num ) continue; - ++rev; - replay_push_block( (*i)->block, controller::block_status::validated ); + if( !fork_db.head() ) { + fork_db.reset( *head ); } - ilog( "${n} reversible blocks replayed", ("n",rev) ); - } - if( !fork_db.head() ) { - fork_db.reset( *head ); - } + auto end = fc::time_point::now(); + ilog( "replayed ${n} blocks in ${duration} seconds, ${mspb} ms/block", + ("n", head->block_num() + 1 - start_block_num)("duration", (end-start).count()/1000000) + ("mspb", ((end-start).count()/1000.0)/(head->block_num()-start_block_num)) ); + replaying = false; + }; - auto end = fc::time_point::now(); - ilog( "replayed ${n} blocks in ${duration} seconds, ${mspb} ms/block", - ("n", head->block_num + 1 - start_block_num)("duration", (end-start).count()/1000000) - ("mspb", ((end-start).count()/1000.0)/(head->block_num-start_block_num)) ); - replaying = false; + block_data.apply(replay_blog); if( except_ptr ) { std::rethrow_exception( except_ptr ); @@ -607,12 +1143,12 @@ struct controller_impl { } else { ilog( "Starting initialization from snapshot and no block log, this may take a significant amount of time" ); read_from_snapshot( snapshot, 0, std::numeric_limits::max() ); - EOS_ASSERT( head->block_num > 0, snapshot_exception, + EOS_ASSERT( head_block_num() > 0, snapshot_exception, "Snapshot indicates controller head at block number 0, but that is not allowed. " "Snapshot is invalid." ); - blog.reset( chain_id, head->block_num + 1 ); + blog.reset( chain_id, head_block_num() + 1 ); } - ilog( "Snapshot loaded, lib: ${lib}", ("lib", head->block_num) ); + ilog( "Snapshot loaded, lib: ${lib}", ("lib", head_block_num()) ); init(std::move(check_shutdown)); auto snapshot_load_time = (fc::time_point::now() - snapshot_load_start_time).to_seconds(); @@ -632,36 +1168,41 @@ struct controller_impl { ); this->shutdown = std::move(shutdown); - if( fork_db.head() ) { - if( read_mode == db_read_mode::IRREVERSIBLE && fork_db.head()->id != fork_db.root()->id ) { - fork_db.rollback_head_to_root(); + + auto do_startup = [&](auto& fork_db, auto& head) { + if( fork_db.head() ) { + if( read_mode == db_read_mode::IRREVERSIBLE && fork_db.head()->id() != fork_db.root()->id() ) { + fork_db.rollback_head_to_root(); + } + wlog( "No existing chain state. Initializing fresh blockchain state." ); + } else { + wlog( "No existing chain state or fork database. Initializing fresh blockchain state and resetting fork database."); } - wlog( "No existing chain state. Initializing fresh blockchain state." ); - } else { - wlog( "No existing chain state or fork database. Initializing fresh blockchain state and resetting fork database."); - } - initialize_blockchain_state(genesis); // sets head to genesis state + initialize_blockchain_state(genesis); // sets head to genesis state - if( !fork_db.head() ) { - fork_db.reset( *head ); - } + if( !fork_db.head() ) { + fork_db.reset( *head ); + } + }; + + block_data.apply(do_startup); if( blog.head() ) { EOS_ASSERT( blog.first_block_num() == 1, block_log_exception, "block log does not start with genesis block" ); } else { - blog.reset( genesis, head->block ); + blog.reset( genesis, head_block() ); } init(std::move(check_shutdown)); } void startup(std::function shutdown, std::function check_shutdown) { EOS_ASSERT( db.revision() >= 1, database_exception, "This version of controller::startup does not work with a fresh state database." ); - EOS_ASSERT( fork_db.head(), fork_database_exception, "No existing fork database despite existing chain state. Replay required." ); + EOS_ASSERT( block_data.fork_db_has_head(), fork_database_exception, "No existing fork database despite existing chain state. Replay required." ); this->shutdown = std::move(shutdown); - uint32_t lib_num = fork_db.root()->block_num; + uint32_t lib_num = fork_db_root_block_num(); auto first_block_num = blog.first_block_num(); if( auto blog_head = blog.head() ) { EOS_ASSERT( first_block_num <= lib_num && lib_num <= blog_head->block_num(), @@ -678,10 +1219,14 @@ struct controller_impl { } } - if( read_mode == db_read_mode::IRREVERSIBLE && fork_db.head()->id != fork_db.root()->id ) { - fork_db.rollback_head_to_root(); - } - head = fork_db.head(); + auto do_startup = [&](auto& fork_db, auto& head) { + if( read_mode == db_read_mode::IRREVERSIBLE && fork_db.head()->id() != fork_db.root()->id() ) { + fork_db.rollback_head_to_root(); + } + head = fork_db.head(); + }; + + block_data.apply(do_startup); init(std::move(check_shutdown)); } @@ -719,16 +1264,16 @@ struct controller_impl { } // At this point head != nullptr - EOS_ASSERT( db.revision() >= head->block_num, fork_database_exception, + EOS_ASSERT( db.revision() >= head_block_num(), fork_database_exception, "fork database head (${head}) is inconsistent with state (${db})", - ("db",db.revision())("head",head->block_num) ); + ("db",db.revision())("head",head_block_num()) ); - if( db.revision() > head->block_num ) { + if( db.revision() > head_block_num() ) { wlog( "database revision (${db}) is greater than head block number (${head}), " "attempting to undo pending changes", - ("db",db.revision())("head",head->block_num) ); + ("db",db.revision())("head",head_block_num()) ); } - while( db.revision() > head->block_num ) { + while( db.revision() > head_block_num() ) { db.undo(); } @@ -736,7 +1281,7 @@ struct controller_impl { // At startup, no transaction specific logging is possible if (auto dm_logger = get_deep_mind_logger(false)) { - dm_logger->on_startup(db, head->block_num); + dm_logger->on_startup(db, head_block_num()); } if( conf.integrity_hash_on_start ) @@ -748,24 +1293,28 @@ struct controller_impl { if( check_shutdown() ) return; // At this point head != nullptr && fork_db.head() != nullptr && fork_db.root() != nullptr. - // Furthermore, fork_db.root()->block_num <= lib_num. + // Furthermore, fork_db.root()->block_num() <= lib_num. // Also, even though blog.head() may still be nullptr, blog.first_block_num() is guaranteed to be lib_num + 1. - if( read_mode != db_read_mode::IRREVERSIBLE - && fork_db.pending_head()->id != fork_db.head()->id - && fork_db.head()->id == fork_db.root()->id - ) { - wlog( "read_mode has changed from irreversible: applying best branch from fork database" ); - - for( auto pending_head = fork_db.pending_head(); - pending_head->id != fork_db.head()->id; - pending_head = fork_db.pending_head() - ) { - wlog( "applying branch from fork database ending with block: ${id}", ("id", pending_head->id) ); - controller::block_report br; - maybe_switch_forks( br, pending_head, controller::block_status::complete, forked_branch_callback{}, trx_meta_cache_lookup{} ); + auto finish_init = [&](auto& fork_db, auto& head) { + if( read_mode != db_read_mode::IRREVERSIBLE + && fork_db.pending_head()->id() != fork_db.head()->id() + && fork_db.head()->id() == fork_db.root()->id() + ) { + wlog( "read_mode has changed from irreversible: applying best branch from fork database" ); + + for( auto pending_head = fork_db.pending_head(); + pending_head->id() != fork_db.head()->id(); + pending_head = fork_db.pending_head() + ) { + wlog( "applying branch from fork database ending with block: ${id}", ("id", pending_head->id()) ); + controller::block_report br; + maybe_switch_forks( br, pending_head, controller::block_status::complete, forked_branch_callback{}, trx_meta_cache_lookup{} ); + } } - } + }; + + block_data.apply(finish_init); } ~controller_impl() { @@ -857,10 +1406,14 @@ struct controller_impl { section.add_row(chain_snapshot_header(), db); }); - snapshot->write_section("eosio::chain::block_state", [this]( auto §ion ){ - section.template add_row(*head, db); - }); - + // [greg todo] add snapshot support for new (IF) block_state section + auto write_block_state_section = [&](auto& fork_db, auto& head) { + snapshot->write_section("eosio::chain::block_state", [&]( auto §ion ) { + section.template add_row(*head, db); + }); + }; + block_data.apply_dpos(write_block_state_section); + controller_index_set::walk_indices([this, &snapshot]( auto utils ){ using value_t = typename decltype(utils)::index_t::value_type; @@ -907,7 +1460,8 @@ struct controller_impl { header.validate(); }); - { /// load and upgrade the block header state + // [greg todo] add snapshot support for new (IF) block_state section + auto read_block_state_section = [&](auto& fork_db, auto& head) { /// load and upgrade the block header state block_header_state_legacy head_header_state; using v2 = legacy::snapshot_block_header_state_v2; @@ -934,7 +1488,8 @@ struct controller_impl { head = std::make_shared(); static_cast(*head) = head_header_state; - } + }; + block_data.apply_dpos(read_block_state_section); controller_index_set::walk_indices([this, &snapshot, &header]( auto utils ){ using value_t = typename decltype(utils)::index_t::value_type; @@ -1013,7 +1568,7 @@ struct controller_impl { authorization.read_from_snapshot(snapshot); resource_limits.read_from_snapshot(snapshot); - db.set_revision( head->block_num ); + db.set_revision( head_block_num() ); db.create([](const auto& header){ // nothing to do }); @@ -1083,7 +1638,7 @@ struct controller_impl { const auto& tapos_block_summary = db.get(1); db.modify( tapos_block_summary, [&]( auto& bs ) { - bs.block_id = head->id; + bs.block_id = block_data.head_block_id(); }); genesis.initial_configuration.validate(); @@ -1139,26 +1694,7 @@ struct controller_impl { } auto& bb = std::get(pending->_block_stage); - auto orig_trx_receipts_size = bb._pending_trx_receipts.size(); - auto orig_trx_metas_size = bb._pending_trx_metas.size(); - auto orig_trx_receipt_digests_size = std::holds_alternative(bb._trx_mroot_or_receipt_digests) ? - std::get(bb._trx_mroot_or_receipt_digests).size() : 0; - auto orig_action_receipt_digests_size = bb._action_receipt_digests.size(); - std::function callback = [this, - orig_trx_receipts_size, - orig_trx_metas_size, - orig_trx_receipt_digests_size, - orig_action_receipt_digests_size]() - { - auto& bb = std::get(pending->_block_stage); - bb._pending_trx_receipts.resize(orig_trx_receipts_size); - bb._pending_trx_metas.resize(orig_trx_metas_size); - if( std::holds_alternative(bb._trx_mroot_or_receipt_digests) ) - std::get(bb._trx_mroot_or_receipt_digests).resize(orig_trx_receipt_digests_size); - bb._action_receipt_digests.resize(orig_action_receipt_digests_size); - }; - - return fc::make_scoped_exit( std::move(callback) ); + return fc::make_scoped_exit(bb.make_block_restore_point()); } transaction_trace_ptr apply_onerror( const generated_transaction& gtrx, @@ -1216,8 +1752,8 @@ struct controller_impl { auto restore = make_block_restore_point(); trace->receipt = push_receipt( gtrx.trx_id, transaction_receipt::soft_fail, trx_context.billed_cpu_time_us, trace->net_usage ); - fc::move_append( std::get(pending->_block_stage)._action_receipt_digests, - std::move(trx_context.executed_action_receipt_digests) ); + auto& bb = std::get(pending->_block_stage); + fc::move_append( bb.action_receipt_digests(), std::move(trx_context.executed_action_receipt_digests) ); trx_context.squash(); restore.cancel(); @@ -1402,7 +1938,7 @@ struct controller_impl { trx_context.billed_cpu_time_us, trace->net_usage ); - fc::move_append( std::get(pending->_block_stage)._action_receipt_digests, + fc::move_append( std::get(pending->_block_stage).action_receipt_digests(), std::move(trx_context.executed_action_receipt_digests) ); trace->account_ram_delta = account_delta( gtrx.payer, trx_removal_ram_delta ); @@ -1522,15 +2058,16 @@ struct controller_impl { uint64_t cpu_usage_us, uint64_t net_usage ) { uint64_t net_usage_words = net_usage / 8; EOS_ASSERT( net_usage_words*8 == net_usage, transaction_exception, "net_usage is not divisible by 8" ); - auto& receipts = std::get(pending->_block_stage)._pending_trx_receipts; + auto& bb = std::get(pending->_block_stage); + auto& receipts = bb.pending_trx_receipts(); receipts.emplace_back( trx ); transaction_receipt& r = receipts.back(); r.cpu_usage_us = cpu_usage_us; r.net_usage_words = net_usage_words; r.status = status; - auto& bb = std::get(pending->_block_stage); - if( std::holds_alternative(bb._trx_mroot_or_receipt_digests) ) - std::get(bb._trx_mroot_or_receipt_digests).emplace_back( r.digest() ); + auto& mroot_or_digests = bb.trx_mroot_or_receipt_digests(); + if( std::holds_alternative(mroot_or_digests) ) + std::get(mroot_or_digests).emplace_back( r.digest() ); return r; } @@ -1612,13 +2149,14 @@ struct controller_impl { auto restore = make_block_restore_point( trx->is_read_only() ); + auto& bb = std::get(pending->_block_stage); trx->billed_cpu_time_us = trx_context.billed_cpu_time_us; if (!trx->implicit() && !trx->is_read_only()) { transaction_receipt::status_enum s = (trx_context.delay == fc::seconds(0)) ? transaction_receipt::executed : transaction_receipt::delayed; trace->receipt = push_receipt(*trx->packed_trx(), s, trx_context.billed_cpu_time_us, trace->net_usage); - std::get(pending->_block_stage)._pending_trx_metas.emplace_back(trx); + bb.pending_trx_metas().emplace_back(trx); } else { transaction_receipt_header r; r.status = transaction_receipt::executed; @@ -1628,7 +2166,7 @@ struct controller_impl { } if ( !trx->is_read_only() ) { - fc::move_append( std::get(pending->_block_stage)._action_receipt_digests, + fc::move_append( bb.action_receipt_digests(), std::move(trx_context.executed_action_receipt_digests) ); if ( !trx->is_dry_run() ) { // call the accept signal but only once for this transaction @@ -1706,33 +2244,39 @@ struct controller_impl { uint32_t hs_lib = hs_irreversible_block_num.load(); const bool hs_active = hs_lib > 0; // the transition from 0 to >0 cannot happen during start_block - emit( self.block_start, head->block_num + 1 ); + emit( self.block_start, head_block_num() + 1 ); // at block level, no transaction specific logging is possible if (auto dm_logger = get_deep_mind_logger(false)) { // The head block represents the block just before this one that is about to start, so add 1 to get this block num - dm_logger->on_start_block(head->block_num + 1); + dm_logger->on_start_block(head_block_num() + 1); } - auto guard_pending = fc::make_scoped_exit([this, head_block_num=head->block_num](){ + auto guard_pending = fc::make_scoped_exit([this, head_block_num=head_block_num()](){ protocol_features.popped_blocks_to( head_block_num ); pending.reset(); }); - if (!self.skip_db_sessions(s)) { - EOS_ASSERT( db.revision() == head->block_num, database_exception, "db revision is not on par with head block", - ("db.revision()", db.revision())("controller_head_block", head->block_num)("fork_db_head_block", fork_db.head()->block_num) ); + //building_block_input bbi{ head->id(), when, head->get_scheduled_producer(when), std::move(new_protocol_feature_activations) }; + // [greg todo] build IF `building_block` below if not in dpos mode. + // we'll need a different `building_block` constructor for IF mode + auto update_pending = [&](auto& fork_db, auto& head) { + if (!self.skip_db_sessions(s)) { + EOS_ASSERT( db.revision() == head_block_num(), database_exception, "db revision is not on par with head block", + ("db.revision()", db.revision())("controller_head_block", head_block_num())("fork_db_head_block", fork_db_head_block_num()) ); - pending.emplace( maybe_session(db), *head, when, hs_active, confirm_block_count, new_protocol_feature_activations ); - } else { - pending.emplace( maybe_session(), *head, when, hs_active, confirm_block_count, new_protocol_feature_activations ); - } + pending.emplace( maybe_session(db), *head, when, confirm_block_count, new_protocol_feature_activations ); + } else { + pending.emplace( maybe_session(), *head, when, confirm_block_count, new_protocol_feature_activations ); + } + }; + + block_data.apply_dpos(update_pending); pending->_block_status = s; pending->_producer_block_id = producer_block_id; auto& bb = std::get(pending->_block_stage); - const auto& pbhs = bb._pending_block_header_state_legacy; // block status is either ephemeral or incomplete. Modify state of speculative block only if we are building a // speculative incomplete block (otherwise we need clean state for head mode, ephemeral block) @@ -1778,9 +2322,9 @@ struct controller_impl { trigger_activation_handler( *f.builtin_feature ); } - protocol_features.activate_feature( feature_digest, pbhs.block_num ); + protocol_features.activate_feature( feature_digest, bb.block_num() ); - ++bb._num_new_protocol_features_that_have_activated; + ++bb.num_new_protocol_features_activated(); } if( num_preactivated_features_that_have_activated == num_preactivated_protocol_features ) { @@ -1797,34 +2341,40 @@ struct controller_impl { ps.preactivated_protocol_features.clear(); for (const auto& digest : new_protocol_feature_activations) - ps.activated_protocol_features.emplace_back(digest, pbhs.block_num); + ps.activated_protocol_features.emplace_back(digest, bb.block_num()); }); } const auto& gpo = self.get_global_properties(); - if( gpo.proposed_schedule_block_num && // if there is a proposed schedule that was proposed in a block ... - ( hs_active || *gpo.proposed_schedule_block_num <= pbhs.dpos_irreversible_blocknum ) && // ... that has now become irreversible or hotstuff activated... - pbhs.prev_pending_schedule.schedule.producers.size() == 0 // ... and there was room for a new pending schedule prior to any possible promotion - ) - { - // Promote proposed schedule to pending schedule; happens in next block after hotstuff activated - EOS_ASSERT( gpo.proposed_schedule.version == pbhs.active_schedule_version + 1, - producer_schedule_exception, "wrong producer schedule version specified" ); - - std::get(pending->_block_stage)._new_pending_producer_schedule = producer_authority_schedule::from_shared(gpo.proposed_schedule); - - if( !replaying ) { - ilog( "promoting proposed schedule (set in block ${proposed_num}) to pending; current block: ${n} lib: ${lib} schedule: ${schedule} ", - ("proposed_num", *gpo.proposed_schedule_block_num)("n", pbhs.block_num) - ("lib", hs_active ? hs_lib : pbhs.dpos_irreversible_blocknum) - ("schedule", std::get(pending->_block_stage)._new_pending_producer_schedule ) ); - } + if (!hs_active) { + bb.apply_dpos([&](building_block::building_block_dpos& bb_dpos) { + pending_block_header_state_legacy& pbhs = bb_dpos.pending_block_header_state; + + if( gpo.proposed_schedule_block_num && // if there is a proposed schedule that was proposed in a block ... + ( hs_active || *gpo.proposed_schedule_block_num <= pbhs.dpos_irreversible_blocknum ) && // ... that has now become irreversible or hotstuff activated... + pbhs.prev_pending_schedule.schedule.producers.size() == 0 // ... and there was room for a new pending schedule prior to any possible promotion + ) + { + // Promote proposed schedule to pending schedule; happens in next block after hotstuff activated + EOS_ASSERT( gpo.proposed_schedule.version == pbhs.active_schedule_version + 1, + producer_schedule_exception, "wrong producer schedule version specified" ); + + bb_dpos.new_pending_producer_schedule = producer_authority_schedule::from_shared(gpo.proposed_schedule); + + if( !replaying ) { + ilog( "promoting proposed schedule (set in block ${proposed_num}) to pending; current block: ${n} lib: ${lib} schedule: ${schedule} ", + ("proposed_num", *gpo.proposed_schedule_block_num)("n", pbhs.block_num) + ("lib", hs_active ? hs_lib : pbhs.dpos_irreversible_blocknum) + ("schedule", bb_dpos.new_pending_producer_schedule ) ); + } - db.modify( gpo, [&]( auto& gp ) { - gp.proposed_schedule_block_num = std::optional(); - gp.proposed_schedule.version=0; - gp.proposed_schedule.producers.clear(); + db.modify( gpo, [&]( auto& gp ) { + gp.proposed_schedule_block_num = std::optional(); + gp.proposed_schedule.version=0; + gp.proposed_schedule.producers.clear(); + }); + } }); } @@ -1839,7 +2389,7 @@ struct controller_impl { auto trace = push_transaction( onbtrx, fc::time_point::maximum(), fc::microseconds::maximum(), gpo.configuration.min_transaction_cpu_usage, true, 0 ); if( trace->except ) { - wlog("onblock ${block_num} is REJECTING: ${entire_trace}",("block_num", head->block_num + 1)("entire_trace", trace)); + wlog("onblock ${block_num} is REJECTING: ${entire_trace}",("block_num", head_block_num() + 1)("entire_trace", trace)); } } catch( const std::bad_alloc& e ) { elog( "on block transaction failed due to a std::bad_alloc" ); @@ -1871,22 +2421,27 @@ struct controller_impl { try { - const bool if_active = hs_irreversible_block_num.load() > 0; - - auto& pbhs = pending->get_pending_block_header_state_legacy(); - auto& bb = std::get(pending->_block_stage); + const bool if_active = !bb.is_dpos(); auto action_merkle_fut = post_async_task( thread_pool.get_executor(), - [ids{std::move( bb._action_receipt_digests )}, if_active]() mutable { - return calc_merkle(std::move(ids), if_active); - } ); - const bool calc_trx_merkle = !std::holds_alternative(bb._trx_mroot_or_receipt_digests); + [ids{std::move( bb.action_receipt_digests() )}, if_active]() mutable { + if (if_active) { + return calculate_merkle( std::move( ids ) ); + } else { + return canonical_merkle( std::move( ids ) ); + } + }); + const bool calc_trx_merkle = !std::holds_alternative(bb.trx_mroot_or_receipt_digests()); std::future trx_merkle_fut; if( calc_trx_merkle ) { trx_merkle_fut = post_async_task( thread_pool.get_executor(), - [ids{std::move( std::get(bb._trx_mroot_or_receipt_digests) )}, if_active]() mutable { - return calc_merkle(std::move(ids), if_active); + [ids{std::move( std::get(bb.trx_mroot_or_receipt_digests()) )}, if_active]() mutable { + if (if_active) { + return calculate_merkle( std::move( ids ) ); + } else { + return canonical_merkle( std::move( ids ) ); + } } ); } @@ -1898,62 +2453,71 @@ struct controller_impl { { CPU_TARGET, chain_config.max_block_cpu_usage, config::block_cpu_usage_average_window_ms / config::block_interval_ms, config::maximum_elastic_resource_multiplier, {99, 100}, {1000, 999}}, {EOS_PERCENT(chain_config.max_block_net_usage, chain_config.target_block_net_usage_pct), chain_config.max_block_net_usage, config::block_size_average_window_ms / config::block_interval_ms, config::maximum_elastic_resource_multiplier, {99, 100}, {1000, 999}} ); - resource_limits.process_block_usage(pbhs.block_num); - - // Create (unsigned) block: - auto block_ptr = std::make_shared( pbhs.make_block_header( - calc_trx_merkle ? trx_merkle_fut.get() : std::get(bb._trx_mroot_or_receipt_digests), - action_merkle_fut.get(), - bb._new_pending_producer_schedule, - std::move( bb._new_protocol_feature_activations ), - protocol_features.get_protocol_feature_set() - ) ); - - block_ptr->transactions = std::move( bb._pending_trx_receipts ); - - if (bb._pending_block_header_state_legacy.proposed_finalizer_policy) { - // proposed_finalizer_policy can't be set until builtin_protocol_feature_t::instant_finality activated - finalizer_policy& fin_pol = *bb._pending_block_header_state_legacy.proposed_finalizer_policy; - ++bb._pending_block_header_state_legacy.last_proposed_finalizer_policy_generation; - fin_pol.generation = bb._pending_block_header_state_legacy.last_proposed_finalizer_policy_generation; + resource_limits.process_block_usage(bb.block_num()); + +#if 0 + // [greg todo] see https://github.com/AntelopeIO/leap/issues/1911 + bb.apply_hs([&](building_block::building_block_if& bb) { + auto proposed_fin_pol = bb.new_finalizer_policy; + if (proposed_fin_pol) { + // proposed_finalizer_policy can't be set until builtin_protocol_feature_t::instant_finality activated + finalizer_policy fin_pol = std::move(*proposed_fin_pol); + fin_pol.generation = bb.apply_hs([&](building_block_if& h) { + return h._bhs.increment_finalizer_policy_generation(); }); #warning set last_qc_block_num, is_last_qc_strong, and new_proposer_policy correctly - uint32_t last_qc_block_num {0}; - bool is_last_qc_strong {false}; - std::optional new_proposer_policy {std::nullopt}; - emplace_extension( - block_ptr->header_extensions, - instant_finality_extension::extension_id(), - fc::raw::pack( instant_finality_extension{ last_qc_block_num, is_last_qc_strong, std::move(fin_pol), std::move(new_proposer_policy) } ) - ); + uint32_t last_qc_block_num {0}; + bool is_last_qc_strong {false}; + std::optional new_proposer_policy {std::nullopt}; + emplace_extension( + block_ptr->header_extensions, + instant_finality_extension::extension_id(), + fc::raw::pack( instant_finality_extension{ last_qc_block_num, is_last_qc_strong, std::move(fin_pol), std::move(new_proposer_policy) } ) + ); } - - auto id = block_ptr->calculate_id(); - - // Update TaPoS table: - create_block_summary( id ); - - /* - ilog( "finalized block ${n} (${id}) at ${t} by ${p} (${signing_key}); schedule_version: ${v} lib: ${lib} #dtrxs: ${ndtrxs} ${np}", - ("n",pbhs.block_num) - ("id",id) - ("t",pbhs.timestamp) - ("p",pbhs.producer) - ("signing_key", pbhs.block_signing_key) - ("v",pbhs.active_schedule_version) - ("lib",pbhs.dpos_irreversible_blocknum) - ("ndtrxs",db.get_index().size()) - ("np",block_ptr->new_producers) - ); - */ - - pending->_block_stage = assembled_block{ - id, - std::move( bb._pending_block_header_state_legacy ), - std::move( bb._pending_trx_metas ), - std::move( block_ptr ), - std::move( bb._new_pending_producer_schedule ) - }; - } FC_CAPTURE_AND_RETHROW() } /// finalize_block +#endif + + // Create (unsigned) block in dpos mode. [greg todo] do it in IF mode later when we are ready to sign it + bb.apply_dpos([&](building_block::building_block_dpos& bb) { + auto block_ptr = std::make_shared( + bb.pending_block_header_state.make_block_header( + calc_trx_merkle ? trx_merkle_fut.get() : std::get(bb.trx_mroot_or_receipt_digests), + action_merkle_fut.get(), + bb.new_pending_producer_schedule, + vector(bb.new_protocol_feature_activations), // have to copy as member declared `const` + protocol_features.get_protocol_feature_set())); + + block_ptr->transactions = std::move(bb.pending_trx_receipts); + + auto id = block_ptr->calculate_id(); + + // Update TaPoS table: + create_block_summary( id ); + + /* + ilog( "finalized block ${n} (${id}) at ${t} by ${p} (${signing_key}); schedule_version: ${v} lib: ${lib} #dtrxs: ${ndtrxs} ${np}", + ("n",pbhs.block_num()) + ("id",id) + ("t",pbhs.timestamp) + ("p",pbhs.producer) + ("signing_key", pbhs.block_signing_key) + ("v",pbhs.active_schedule_version) + ("lib",pbhs.dpos_irreversible_blocknum) + ("ndtrxs",db.get_index().size()) + ("np",block_ptr->new_producers) + ); + */ + + pending->_block_stage = assembled_block{assembled_block::assembled_block_dpos{ + id, + std::move( bb.pending_block_header_state ), + std::move( bb.pending_trx_metas ), + std::move( block_ptr ), + std::move( bb.new_pending_producer_schedule ) + }}; + }); + } + FC_CAPTURE_AND_RETHROW() + } /// finalize_block /** * @post regardless of the success of commit block there is no active pending block @@ -1967,24 +2531,33 @@ struct controller_impl { EOS_ASSERT( std::holds_alternative(pending->_block_stage), block_validate_exception, "cannot call commit_block until pending block is completed" ); - const auto& bsp = std::get(pending->_block_stage)._block_state; + const auto& cb = std::get(pending->_block_stage); - if( s == controller::block_status::incomplete ) { - fork_db.add( bsp ); - fork_db.mark_valid( bsp ); - emit( self.accepted_block_header, std::tie(bsp->block, bsp->id) ); - EOS_ASSERT( bsp == fork_db.head(), fork_database_exception, "committed block did not become the new head in fork database"); - } else if (s != controller::block_status::irreversible) { - fork_db.mark_valid( bsp ); - } - head = bsp; + auto add_completed_block = [&](auto& fork_db, auto& head) { + const auto& bsp = std::get>(cb.bsp); - // at block level, no transaction specific logging is possible - if (auto* dm_logger = get_deep_mind_logger(false)) { - dm_logger->on_accepted_block(bsp); - } + if( s == controller::block_status::incomplete ) { + fork_db.add( bsp ); + fork_db.mark_valid( bsp ); + emit( self.accepted_block_header, std::tie(bsp->block, bsp->id()) ); + EOS_ASSERT( bsp == fork_db.head(), fork_database_exception, "committed block did not become the new head in fork database"); + } else if (s != controller::block_status::irreversible) { + fork_db.mark_valid( bsp ); + } + head = bsp; + + emit( self.accepted_block, std::tie(bsp->block, bsp->id()) ); + + if constexpr (std::is_same_v>) {\ + // [greg todo] support deep_mind_logger even when in IF mode + // at block level, no transaction specific logging is possible + if (auto* dm_logger = get_deep_mind_logger(false)) { + dm_logger->on_accepted_block(bsp); + } + } + }; - emit( self.accepted_block, std::tie(bsp->block, bsp->id) ); + block_data.apply(add_completed_block); if( s == controller::block_status::incomplete ) { log_irreversible(); @@ -2004,8 +2577,7 @@ struct controller_impl { void set_proposed_finalizers(const finalizer_policy& fin_pol) { assert(pending); // has to exist and be building_block since called from host function auto& bb = std::get(pending->_block_stage); - - bb._pending_block_header_state_legacy.proposed_finalizer_policy.emplace(fin_pol); + bb.apply_hs([&](building_block::building_block_if& bb) { bb.new_finalizer_policy.emplace(fin_pol); }); } /** @@ -2089,113 +2661,122 @@ struct controller_impl { #undef EOS_REPORT } - - void apply_block( controller::block_report& br, const block_state_legacy_ptr& bsp, controller::block_status s, + template + void apply_block( controller::block_report& br, const BSP& bsp, controller::block_status s, const trx_meta_cache_lookup& trx_lookup ) { try { try { - auto start = fc::time_point::now(); - const signed_block_ptr& b = bsp->block; - const auto& new_protocol_feature_activations = bsp->get_new_protocol_feature_activations(); - - auto producer_block_id = bsp->id; - start_block( b->timestamp, b->confirmed, new_protocol_feature_activations, s, producer_block_id, fc::time_point::maximum() ); - - // validated in create_block_state_future() - std::get(pending->_block_stage)._trx_mroot_or_receipt_digests = b->transaction_mroot; - - const bool existing_trxs_metas = !bsp->trxs_metas().empty(); - const bool pub_keys_recovered = bsp->is_pub_keys_recovered(); - const bool skip_auth_checks = self.skip_auth_check(); - std::vector> trx_metas; - bool use_bsp_cached = false; - if( pub_keys_recovered || (skip_auth_checks && existing_trxs_metas) ) { - use_bsp_cached = true; - } else { - trx_metas.reserve( b->transactions.size() ); - for( const auto& receipt : b->transactions ) { - if( std::holds_alternative(receipt.trx)) { - const auto& pt = std::get(receipt.trx); - transaction_metadata_ptr trx_meta_ptr = trx_lookup ? trx_lookup( pt.id() ) : transaction_metadata_ptr{}; - if( trx_meta_ptr && *trx_meta_ptr->packed_trx() != pt ) trx_meta_ptr = nullptr; - if( trx_meta_ptr && ( skip_auth_checks || !trx_meta_ptr->recovered_keys().empty() ) ) { - trx_metas.emplace_back( std::move( trx_meta_ptr ), recover_keys_future{} ); - } else if( skip_auth_checks ) { - packed_transaction_ptr ptrx( b, &pt ); // alias signed_block_ptr - trx_metas.emplace_back( - transaction_metadata::create_no_recover_keys( std::move(ptrx), transaction_metadata::trx_type::input ), - recover_keys_future{} ); - } else { - packed_transaction_ptr ptrx( b, &pt ); // alias signed_block_ptr - auto fut = transaction_metadata::start_recover_keys( - std::move( ptrx ), thread_pool.get_executor(), chain_id, fc::microseconds::maximum(), transaction_metadata::trx_type::input ); - trx_metas.emplace_back( transaction_metadata_ptr{}, std::move( fut ) ); + // [greg todo] remove `if`, `lambda` and `apply_dpos`, and make code work for both versions of BSP + if constexpr (std::is_same_v) { + auto do_the_work = [&](auto& fork_db, auto& head) { + auto start = fc::time_point::now(); + const signed_block_ptr& b = bsp->block; + const auto& new_protocol_feature_activations = bsp->get_new_protocol_feature_activations(); + + auto producer_block_id = bsp->id(); + start_block( b->timestamp, b->confirmed, new_protocol_feature_activations, s, producer_block_id, fc::time_point::maximum() ); + + // validated in create_block_state_future() + std::get(pending->_block_stage).trx_mroot_or_receipt_digests() = b->transaction_mroot; + + const bool existing_trxs_metas = !bsp->trxs_metas().empty(); + const bool pub_keys_recovered = bsp->is_pub_keys_recovered(); + const bool skip_auth_checks = self.skip_auth_check(); + std::vector> trx_metas; + bool use_bsp_cached = false; + if( pub_keys_recovered || (skip_auth_checks && existing_trxs_metas) ) { + use_bsp_cached = true; + } else { + trx_metas.reserve( b->transactions.size() ); + for( const auto& receipt : b->transactions ) { + if( std::holds_alternative(receipt.trx)) { + const auto& pt = std::get(receipt.trx); + transaction_metadata_ptr trx_meta_ptr = trx_lookup ? trx_lookup( pt.id() ) : transaction_metadata_ptr{}; + if( trx_meta_ptr && *trx_meta_ptr->packed_trx() != pt ) trx_meta_ptr = nullptr; + if( trx_meta_ptr && ( skip_auth_checks || !trx_meta_ptr->recovered_keys().empty() ) ) { + trx_metas.emplace_back( std::move( trx_meta_ptr ), recover_keys_future{} ); + } else if( skip_auth_checks ) { + packed_transaction_ptr ptrx( b, &pt ); // alias signed_block_ptr + trx_metas.emplace_back( + transaction_metadata::create_no_recover_keys( std::move(ptrx), transaction_metadata::trx_type::input ), + recover_keys_future{} ); + } else { + packed_transaction_ptr ptrx( b, &pt ); // alias signed_block_ptr + auto fut = transaction_metadata::start_recover_keys( + std::move( ptrx ), thread_pool.get_executor(), chain_id, fc::microseconds::maximum(), transaction_metadata::trx_type::input ); + trx_metas.emplace_back( transaction_metadata_ptr{}, std::move( fut ) ); + } + } } } - } - } - transaction_trace_ptr trace; - - size_t packed_idx = 0; - const auto& trx_receipts = std::get(pending->_block_stage)._pending_trx_receipts; - for( const auto& receipt : b->transactions ) { - auto num_pending_receipts = trx_receipts.size(); - if( std::holds_alternative(receipt.trx) ) { - const auto& trx_meta = ( use_bsp_cached ? bsp->trxs_metas().at( packed_idx ) - : ( !!std::get<0>( trx_metas.at( packed_idx ) ) ? - std::get<0>( trx_metas.at( packed_idx ) ) - : std::get<1>( trx_metas.at( packed_idx ) ).get() ) ); - trace = push_transaction( trx_meta, fc::time_point::maximum(), fc::microseconds::maximum(), receipt.cpu_usage_us, true, 0 ); - ++packed_idx; - } else if( std::holds_alternative(receipt.trx) ) { - trace = push_scheduled_transaction( std::get(receipt.trx), fc::time_point::maximum(), fc::microseconds::maximum(), receipt.cpu_usage_us, true ); - } else { - EOS_ASSERT( false, block_validate_exception, "encountered unexpected receipt type" ); - } + transaction_trace_ptr trace; + + size_t packed_idx = 0; + const auto& trx_receipts = std::get(pending->_block_stage).pending_trx_receipts(); + for( const auto& receipt : b->transactions ) { + auto num_pending_receipts = trx_receipts.size(); + if( std::holds_alternative(receipt.trx) ) { + const auto& trx_meta = ( use_bsp_cached ? bsp->trxs_metas().at( packed_idx ) + : ( !!std::get<0>( trx_metas.at( packed_idx ) ) ? + std::get<0>( trx_metas.at( packed_idx ) ) + : std::get<1>( trx_metas.at( packed_idx ) ).get() ) ); + trace = push_transaction( trx_meta, fc::time_point::maximum(), fc::microseconds::maximum(), receipt.cpu_usage_us, true, 0 ); + ++packed_idx; + } else if( std::holds_alternative(receipt.trx) ) { + trace = push_scheduled_transaction( std::get(receipt.trx), fc::time_point::maximum(), fc::microseconds::maximum(), receipt.cpu_usage_us, true ); + } else { + EOS_ASSERT( false, block_validate_exception, "encountered unexpected receipt type" ); + } - bool transaction_failed = trace && trace->except; - bool transaction_can_fail = receipt.status == transaction_receipt_header::hard_fail && std::holds_alternative(receipt.trx); - if( transaction_failed && !transaction_can_fail) { - edump((*trace)); - throw *trace->except; - } + bool transaction_failed = trace && trace->except; + bool transaction_can_fail = receipt.status == transaction_receipt_header::hard_fail && std::holds_alternative(receipt.trx); + if( transaction_failed && !transaction_can_fail) { + edump((*trace)); + throw *trace->except; + } - EOS_ASSERT( trx_receipts.size() > 0, - block_validate_exception, "expected a receipt, block_num ${bn}, block_id ${id}, receipt ${e}", - ("bn", b->block_num())("id", producer_block_id)("e", receipt) - ); - EOS_ASSERT( trx_receipts.size() == num_pending_receipts + 1, - block_validate_exception, "expected receipt was not added, block_num ${bn}, block_id ${id}, receipt ${e}", - ("bn", b->block_num())("id", producer_block_id)("e", receipt) - ); - const transaction_receipt_header& r = trx_receipts.back(); - EOS_ASSERT( r == static_cast(receipt), - block_validate_exception, "receipt does not match, ${lhs} != ${rhs}", - ("lhs", r)("rhs", static_cast(receipt)) ); - } + EOS_ASSERT( trx_receipts.size() > 0, + block_validate_exception, "expected a receipt, block_num ${bn}, block_id ${id}, receipt ${e}", + ("bn", b->block_num())("id", producer_block_id)("e", receipt) + ); + EOS_ASSERT( trx_receipts.size() == num_pending_receipts + 1, + block_validate_exception, "expected receipt was not added, block_num ${bn}, block_id ${id}, receipt ${e}", + ("bn", b->block_num())("id", producer_block_id)("e", receipt) + ); + const transaction_receipt_header& r = trx_receipts.back(); + EOS_ASSERT( r == static_cast(receipt), + block_validate_exception, "receipt does not match, ${lhs} != ${rhs}", + ("lhs", r)("rhs", static_cast(receipt)) ); + } - finalize_block(); + finalize_block(); - auto& ab = std::get(pending->_block_stage); + auto& ab = std::get(pending->_block_stage); - if( producer_block_id != ab._id ) { - elog( "Validation block id does not match producer block id" ); - report_block_header_diff( *b, *ab._unsigned_block ); - // this implicitly asserts that all header fields (less the signature) are identical - EOS_ASSERT( producer_block_id == ab._id, block_validate_exception, "Block ID does not match", - ("producer_block_id", producer_block_id)("validator_block_id", ab._id) ); - } + if( producer_block_id != ab.id() ) { + elog( "Validation block id does not match producer block id" ); - if( !use_bsp_cached ) { - bsp->set_trxs_metas( std::move( ab._trx_metas ), !skip_auth_checks ); - } - // create completed_block with the existing block_state as we just verified it is the same as assembled_block - pending->_block_stage = completed_block{ bsp }; + // [greg todo] also call `report_block_header_diff in IF mode once we have a signed_block + ab.apply_dpos([&](assembled_block::assembled_block_dpos& ab) { report_block_header_diff( *b, *ab.unsigned_block ); }); + + // this implicitly asserts that all header fields (less the signature) are identical + EOS_ASSERT( producer_block_id == ab.id(), block_validate_exception, "Block ID does not match", + ("producer_block_id", producer_block_id)("validator_block_id", ab.id()) ); + } - br = pending->_block_report; // copy before commit block destroys pending - commit_block(s); - br.total_time = fc::time_point::now() - start; + if( !use_bsp_cached ) { + bsp->set_trxs_metas( ab.extract_trx_metas(), !skip_auth_checks ); + } + // create completed_block with the existing block_state as we just verified it is the same as assembled_block + pending->_block_stage = completed_block{ bsp }; + + br = pending->_block_report; // copy before commit block destroys pending + commit_block(s); + br.total_time = fc::time_point::now() - start; + }; + block_data.apply_dpos(do_the_work); + } return; } catch ( const std::bad_alloc& ) { throw; @@ -2242,40 +2823,48 @@ struct controller_impl { skip_validate_signee ); - EOS_ASSERT( id == bsp->id, block_validate_exception, - "provided id ${id} does not match block id ${bid}", ("id", id)("bid", bsp->id) ); + EOS_ASSERT( id == bsp->id(), block_validate_exception, + "provided id ${id} does not match block id ${bid}", ("id", id)("bid", bsp->id()) ); return bsp; } std::future create_block_state_future( const block_id_type& id, const signed_block_ptr& b ) { EOS_ASSERT( b, block_validate_exception, "null block" ); - return post_async_task( thread_pool.get_executor(), [b, id, control=this]() { - // no reason for a block_state if fork_db already knows about block - auto existing = control->fork_db.get_block( id ); - EOS_ASSERT( !existing, fork_database_exception, "we already know about this block: ${id}", ("id", id) ); + auto f = [&](auto& fork_db, auto& head) -> std::future { + return post_async_task( thread_pool.get_executor(), [b, id, &fork_db, control=this]() { + // no reason for a block_state if fork_db already knows about block + auto existing = fork_db.get_block( id ); + EOS_ASSERT( !existing, fork_database_exception, "we already know about this block: ${id}", ("id", id) ); - auto prev = control->fork_db.get_block_header( b->previous ); - EOS_ASSERT( prev, unlinkable_block_exception, - "unlinkable block ${id}", ("id", id)("previous", b->previous) ); + auto prev = fork_db.get_block_header( b->previous ); + EOS_ASSERT( prev, unlinkable_block_exception, + "unlinkable block ${id}", ("id", id)("previous", b->previous) ); - return control->create_block_state_i( id, b, *prev ); - } ); + return control->create_block_state_i( id, b, *prev ); // [greg todo] make it work with apply() (if `create_block_state_future` needed) + } ); + }; + + return block_data.apply_dpos>(f); // [greg todo] make it work with apply() } // thread safe, expected to be called from thread other than the main thread block_state_legacy_ptr create_block_state( const block_id_type& id, const signed_block_ptr& b ) { EOS_ASSERT( b, block_validate_exception, "null block" ); + + auto f = [&](auto& fork_db, auto& head) -> block_state_legacy_ptr { + // no reason for a block_state if fork_db already knows about block + auto existing = fork_db.get_block( id ); + EOS_ASSERT( !existing, fork_database_exception, "we already know about this block: ${id}", ("id", id) ); - // no reason for a block_state if fork_db already knows about block - auto existing = fork_db.get_block( id ); - EOS_ASSERT( !existing, fork_database_exception, "we already know about this block: ${id}", ("id", id) ); + // previous not found could mean that previous block not applied yet + auto prev = fork_db.get_block_header( b->previous ); + if( !prev ) return {}; - // previous not found could mean that previous block not applied yet - auto prev = fork_db.get_block_header( b->previous ); - if( !prev ) return {}; + return create_block_state_i( id, b, *prev ); // [greg todo] make it work with apply() - if `create_block_state` needed + }; - return create_block_state_i( id, b, *prev ); + return block_data.apply_dpos(f); } void push_block( controller::block_report& br, @@ -2298,20 +2887,25 @@ struct controller_impl { shutdown(); return; } + + auto do_push = [&](auto& fork_db, auto& head) { + fork_db.add( bsp ); - fork_db.add( bsp ); + if (self.is_trusted_producer(b->producer)) { + trusted_producer_light_validation = true; + }; - if (self.is_trusted_producer(b->producer)) { - trusted_producer_light_validation = true; - }; + emit( self.accepted_block_header, std::tie(bsp->block, bsp->id()) ); - emit( self.accepted_block_header, std::tie(bsp->block, bsp->id) ); + if( read_mode != db_read_mode::IRREVERSIBLE ) { + maybe_switch_forks( br, fork_db.pending_head(), s, forked_branch_cb, trx_lookup ); + } else { + log_irreversible(); + } + }; - if( read_mode != db_read_mode::IRREVERSIBLE ) { - maybe_switch_forks( br, fork_db.pending_head(), s, forked_branch_cb, trx_lookup ); - } else { - log_irreversible(); - } + block_data.apply_dpos(do_push); // [greg todo] make it work with apply() - `push_block` taking block_state_legacy_ptr + // and forked_branch_callback } FC_LOG_AND_RETHROW( ) } @@ -2334,137 +2928,145 @@ struct controller_impl { const bool skip_validate_signee = !conf.force_all_checks; - auto bsp = std::make_shared( - *head, - b, - protocol_features.get_protocol_feature_set(), - b->confirmed == hs_block_confirmed, // is hotstuff enabled for block - [this]( block_timestamp_type timestamp, - const flat_set& cur_features, - const vector& new_features ) - { check_protocol_features( timestamp, cur_features, new_features ); }, - skip_validate_signee - ); + auto do_push = [&](auto& fork_db, auto& head) { + auto bsp = std::make_shared( + *head, + b, + protocol_features.get_protocol_feature_set(), + b->confirmed == hs_block_confirmed, // is hotstuff enabled for block + [this]( block_timestamp_type timestamp, + const flat_set& cur_features, + const vector& new_features ) + { check_protocol_features( timestamp, cur_features, new_features ); }, + skip_validate_signee + ); - if( s != controller::block_status::irreversible ) { - fork_db.add( bsp, true ); - } + if( s != controller::block_status::irreversible ) { + fork_db.add( bsp, true ); + } - emit( self.accepted_block_header, std::tie(bsp->block, bsp->id) ); + emit( self.accepted_block_header, std::tie(bsp->block, bsp->id()) ); - controller::block_report br; - if( s == controller::block_status::irreversible ) { - apply_block( br, bsp, s, trx_meta_cache_lookup{} ); + controller::block_report br; + if( s == controller::block_status::irreversible ) { + apply_block( br, bsp, s, trx_meta_cache_lookup{} ); - // On replay, log_irreversible is not called and so no irreversible_block signal is emitted. - // So emit it explicitly here. - emit( self.irreversible_block, std::tie(bsp->block, bsp->id) ); + // On replay, log_irreversible is not called and so no irreversible_block signal is emitted. + // So emit it explicitly here. + emit( self.irreversible_block, std::tie(bsp->block, bsp->id()) ); - if (!self.skip_db_sessions(s)) { - db.commit(bsp->block_num); - } + if (!self.skip_db_sessions(s)) { + db.commit(bsp->block_num()); + } - } else { - EOS_ASSERT( read_mode != db_read_mode::IRREVERSIBLE, block_validate_exception, - "invariant failure: cannot replay reversible blocks while in irreversible mode" ); - maybe_switch_forks( br, bsp, s, forked_branch_callback{}, trx_meta_cache_lookup{} ); - } + } else { + EOS_ASSERT( read_mode != db_read_mode::IRREVERSIBLE, block_validate_exception, + "invariant failure: cannot replay reversible blocks while in irreversible mode" ); + maybe_switch_forks( br, bsp, s, forked_branch_callback{}, trx_meta_cache_lookup{} ); + } + }; + + block_data.apply_dpos(do_push); // [greg todo] make it work with apply() - need block_state constructor } FC_LOG_AND_RETHROW( ) } - void maybe_switch_forks( controller::block_report& br, const block_state_legacy_ptr& new_head, controller::block_status s, + template + void maybe_switch_forks( controller::block_report& br, const BSP& new_head, controller::block_status s, const forked_branch_callback& forked_branch_cb, const trx_meta_cache_lookup& trx_lookup ) { - bool head_changed = true; - if( new_head->header.previous == head->id ) { - try { - apply_block( br, new_head, s, trx_lookup ); - } catch ( const std::exception& e ) { - fork_db.remove( new_head->id ); - throw; - } - } else if( new_head->id != head->id ) { - auto old_head = head; - ilog("switching forks from ${current_head_id} (block number ${current_head_num}) to ${new_head_id} (block number ${new_head_num})", - ("current_head_id", head->id)("current_head_num", head->block_num)("new_head_id", new_head->id)("new_head_num", new_head->block_num) ); - - // not possible to log transaction specific infor when switching forks - if (auto dm_logger = get_deep_mind_logger(false)) { - dm_logger->on_switch_forks(head->id, new_head->id); - } - - auto branches = fork_db.fetch_branch_from( new_head->id, head->id ); - - if( branches.second.size() > 0 ) { - for( auto itr = branches.second.begin(); itr != branches.second.end(); ++itr ) { - pop_block(); + auto do_maybe_switch_forks = [&](auto& fork_db, auto& head) { + bool head_changed = true; + if( new_head->header.previous == head->id() ) { + try { + apply_block( br, new_head, s, trx_lookup ); + } catch ( const std::exception& e ) { + fork_db.remove( new_head->id() ); + throw; } - EOS_ASSERT( self.head_block_id() == branches.second.back()->header.previous, fork_database_exception, - "loss of sync between fork_db and chainbase during fork switch" ); // _should_ never fail + } else if( new_head->id() != head->id() ) { + ilog("switching forks from ${current_head_id} (block number ${current_head_num}) to ${new_head_id} (block number ${new_head_num})", + ("current_head_id", head->id())("current_head_num", head_block_num())("new_head_id", new_head->id())("new_head_num", new_head->block_num()) ); - if( forked_branch_cb ) forked_branch_cb( branches.second ); - } - - for( auto ritr = branches.first.rbegin(); ritr != branches.first.rend(); ++ritr ) { - auto except = std::exception_ptr{}; - try { - br = controller::block_report{}; - apply_block( br, *ritr, (*ritr)->is_valid() ? controller::block_status::validated - : controller::block_status::complete, trx_lookup ); - } catch ( const std::bad_alloc& ) { - throw; - } catch ( const boost::interprocess::bad_alloc& ) { - throw; - } catch (const fc::exception& e) { - elog("exception thrown while switching forks ${e}", ("e", e.to_detail_string())); - except = std::current_exception(); - } catch (const std::exception& e) { - elog("exception thrown while switching forks ${e}", ("e", e.what())); - except = std::current_exception(); + // not possible to log transaction specific infor when switching forks + if (auto dm_logger = get_deep_mind_logger(false)) { + dm_logger->on_switch_forks(head->id(), new_head->id()); } - if( except ) { - // ritr currently points to the block that threw - // Remove the block that threw and all forks built off it. - fork_db.remove( (*ritr)->id ); + auto branches = fork_db.fetch_branch_from( new_head->id(), head->id() ); - // pop all blocks from the bad fork, discarding their transactions - // ritr base is a forward itr to the last block successfully applied - auto applied_itr = ritr.base(); - for( auto itr = applied_itr; itr != branches.first.end(); ++itr ) { + if( branches.second.size() > 0 ) { + for( auto itr = branches.second.begin(); itr != branches.second.end(); ++itr ) { pop_block(); } EOS_ASSERT( self.head_block_id() == branches.second.back()->header.previous, fork_database_exception, - "loss of sync between fork_db and chainbase during fork switch reversal" ); // _should_ never fail + "loss of sync between fork_db and chainbase during fork switch" ); // _should_ never fail + + if( forked_branch_cb ) forked_branch_cb( branches.second ); + } - // re-apply good blocks - for( auto ritr = branches.second.rbegin(); ritr != branches.second.rend(); ++ritr ) { + for( auto ritr = branches.first.rbegin(); ritr != branches.first.rend(); ++ritr ) { + auto except = std::exception_ptr{}; + try { br = controller::block_report{}; - apply_block( br, *ritr, controller::block_status::validated /* we previously validated these blocks*/, trx_lookup ); - } - std::rethrow_exception(except); - } // end if exception - } /// end for each block in branch - - if (fc::logger::get(DEFAULT_LOGGER).is_enabled(fc::log_level::info)) { - auto get_ids = [&](auto& container)->std::string { - std::string ids; - for(auto ritr = container.rbegin(), e = container.rend(); ritr != e; ++ritr) { - ids += std::to_string((*ritr)->block_num) + ":" + (*ritr)->id.str() + ","; + apply_block( br, *ritr, (*ritr)->is_valid() ? controller::block_status::validated + : controller::block_status::complete, trx_lookup ); + } catch ( const std::bad_alloc& ) { + throw; + } catch ( const boost::interprocess::bad_alloc& ) { + throw; + } catch (const fc::exception& e) { + elog("exception thrown while switching forks ${e}", ("e", e.to_detail_string())); + except = std::current_exception(); + } catch (const std::exception& e) { + elog("exception thrown while switching forks ${e}", ("e", e.what())); + except = std::current_exception(); } - if (!ids.empty()) ids.resize(ids.size()-1); - return ids; - }; - ilog("successfully switched fork to new head ${new_head_id}, removed {${rm_ids}}, applied {${new_ids}}", - ("new_head_id", new_head->id)("rm_ids", get_ids(branches.second))("new_ids", get_ids(branches.first))); + + if( except ) { + // ritr currently points to the block that threw + // Remove the block that threw and all forks built off it. + fork_db.remove( (*ritr)->id() ); + + // pop all blocks from the bad fork, discarding their transactions + // ritr base is a forward itr to the last block successfully applied + auto applied_itr = ritr.base(); + for( auto itr = applied_itr; itr != branches.first.end(); ++itr ) { + pop_block(); + } + EOS_ASSERT( self.head_block_id() == branches.second.back()->header.previous, fork_database_exception, + "loss of sync between fork_db and chainbase during fork switch reversal" ); // _should_ never fail + + // re-apply good blocks + for( auto ritr = branches.second.rbegin(); ritr != branches.second.rend(); ++ritr ) { + br = controller::block_report{}; + apply_block( br, *ritr, controller::block_status::validated /* we previously validated these blocks*/, trx_lookup ); + } + std::rethrow_exception(except); + } // end if exception + } /// end for each block in branch + + if (fc::logger::get(DEFAULT_LOGGER).is_enabled(fc::log_level::info)) { + auto get_ids = [&](auto& container)->std::string { + std::string ids; + for(auto ritr = container.rbegin(), e = container.rend(); ritr != e; ++ritr) { + ids += std::to_string((*ritr)->block_num()) + ":" + (*ritr)->id().str() + ","; + } + if (!ids.empty()) ids.resize(ids.size()-1); + return ids; + }; + ilog("successfully switched fork to new head ${new_head_id}, removed {${rm_ids}}, applied {${new_ids}}", + ("new_head_id", new_head->id())("rm_ids", get_ids(branches.second))("new_ids", get_ids(branches.first))); + } + } else { + head_changed = false; } - } else { - head_changed = false; - } - if( head_changed ) - log_irreversible(); + if( head_changed ) + log_irreversible(); + }; + + block_data.apply_dpos(do_maybe_switch_forks); // [greg todo] } /// push_block @@ -2473,7 +3075,7 @@ struct controller_impl { if( pending ) { applied_trxs = pending->extract_trx_metas(); pending.reset(); - protocol_features.popped_blocks_to( head->block_num ); + protocol_features.popped_blocks_to( head_block_num() ); } return applied_trxs; } @@ -2496,39 +3098,44 @@ struct controller_impl { } void update_producers_authority() { - const auto& producers = pending->get_pending_block_header_state_legacy().active_schedule.producers; - - auto update_permission = [&]( auto& permission, auto threshold ) { - auto auth = authority( threshold, {}, {}); - for( auto& p : producers ) { - auth.accounts.push_back({{p.producer_name, config::active_name}, 1}); - } + // this is not called when hotstuff is activated + auto& bb = std::get(pending->_block_stage); + bb.apply_dpos([this](building_block::building_block_dpos& dpos_header) { + pending_block_header_state_legacy& pbhs = dpos_header.pending_block_header_state; + const auto& producers = pbhs.active_schedule.producers; + + auto update_permission = [&](auto& permission, auto threshold) { + auto auth = authority(threshold, {}, {}); + for (auto& p : producers) { + auth.accounts.push_back({ + {p.producer_name, config::active_name}, + 1 + }); + } - if( permission.auth != auth ) { - db.modify(permission, [&]( auto& po ) { - po.auth = auth; - }); - } - }; + if (permission.auth != auth) { + db.modify(permission, [&](auto& po) { po.auth = auth; }); + } + }; - uint32_t num_producers = producers.size(); - auto calculate_threshold = [=]( uint32_t numerator, uint32_t denominator ) { - return ( (num_producers * numerator) / denominator ) + 1; - }; + uint32_t num_producers = producers.size(); + auto calculate_threshold = [=](uint32_t numerator, uint32_t denominator) { + return ((num_producers * numerator) / denominator) + 1; + }; - update_permission( authorization.get_permission({config::producers_account_name, - config::active_name}), - calculate_threshold( 2, 3 ) /* more than two-thirds */ ); + update_permission(authorization.get_permission({config::producers_account_name, config::active_name}), + calculate_threshold(2, 3) /* more than two-thirds */); - update_permission( authorization.get_permission({config::producers_account_name, - config::majority_producers_permission_name}), - calculate_threshold( 1, 2 ) /* more than one-half */ ); + update_permission( + authorization.get_permission({config::producers_account_name, config::majority_producers_permission_name}), + calculate_threshold(1, 2) /* more than one-half */); - update_permission( authorization.get_permission({config::producers_account_name, - config::minority_producers_permission_name}), - calculate_threshold( 1, 3 ) /* more than one-third */ ); + update_permission( + authorization.get_permission({config::producers_account_name, config::minority_producers_permission_name}), + calculate_threshold(1, 3) /* more than one-third */); - //TODO: Add tests + // TODO: Add tests + }); } void create_block_summary(const block_id_type& id) { @@ -2544,7 +3151,7 @@ struct controller_impl { //Look for expired transactions in the deduplication list, and remove them. auto& transaction_idx = db.get_mutable_index(); const auto& dedupe_index = transaction_idx.indices().get(); - auto now = self.is_building_block() ? self.pending_block_time() : self.head_block_time(); + auto now = self.is_building_block() ? self.pending_block_time() : (time_point)self.head_block_time(); const auto total = dedupe_index.size(); uint32_t num_removed = 0; while( (!dedupe_index.empty()) && ( now > dedupe_index.begin()->expiration.to_time_point() ) ) { @@ -2735,7 +3342,7 @@ struct controller_impl { } uint32_t earliest_available_block_num() const { - return (blog.first_block_num() != 0) ? blog.first_block_num() : fork_db.root()->block_num; + return (blog.first_block_num() != 0) ? blog.first_block_num() : fork_db_root_block_num(); } void set_to_write_window() { @@ -2772,7 +3379,17 @@ struct controller_impl { wasmif.code_block_num_last_used(code_hash, vm_type, vm_version, block_num); } - block_state_legacy_ptr fork_db_head() const; + bool irreversible_mode() const { return read_mode == db_read_mode::IRREVERSIBLE; } + const block_id_type& fork_db_head_block_id() const { return block_data.fork_db_head_block_id(irreversible_mode()); } + uint32_t fork_db_head_block_num() const { return block_data.fork_db_head_block_num(irreversible_mode()); } + uint32_t fork_db_head_irreversible_blocknum() const { return block_data.fork_db_head_irreversible_blocknum(irreversible_mode()); } + bool fork_db_has_root() const { return block_data.fork_db_has_root(); } + uint32_t fork_db_root_block_num() const { return block_data.fork_db_root_block_num(); } + const block_id_type& fork_db_root_block_id() const { return block_data.fork_db_root_block_id(); } + block_timestamp_type fork_db_root_timestamp() const { return block_data.fork_db_root_timestamp(); } + + uint32_t head_block_num() const { return block_data.head_block_num(); } + const signed_block_ptr& head_block() const { return block_data.head_block(); } }; /// controller_impl thread_local platform_timer controller_impl::timer; @@ -2852,8 +3469,6 @@ const chainbase::database& controller::db()const { return my->db; } chainbase::database& controller::mutable_db()const { return my->db; } -const fork_database& controller::fork_db()const { return my->fork_db; } - void controller::preactivate_feature( const digest_type& feature_digest, bool is_trx_transient ) { const auto& pfs = my->protocol_features.get_protocol_feature_set(); auto cur_time = pending_block_time(); @@ -2977,8 +3592,8 @@ vector controller::get_preactivated_protocol_features()const { } void controller::validate_protocol_features( const vector& features_to_activate )const { - my->check_protocol_features( my->head->header.timestamp, - my->head->activated_protocol_features->protocol_features, + my->check_protocol_features( my->block_data.head_block_time(), + my->block_data.head_activated_protocol_features()->protocol_features, features_to_activate ); } @@ -3000,30 +3615,18 @@ void controller::start_block( block_timestamp_type when, bs, std::optional(), deadline ); } -block_state_legacy_ptr controller::finalize_block( block_report& br, const signer_callback_type& signer_callback ) { +void controller::finalize_block( block_report& br, const signer_callback_type& signer_callback ) { validate_db_available_size(); my->finalize_block(); auto& ab = std::get(my->pending->_block_stage); - - auto bsp = std::make_shared( - std::move( ab._pending_block_header_state_legacy ), - std::move( ab._unsigned_block ), - std::move( ab._trx_metas ), - my->protocol_features.get_protocol_feature_set(), - []( block_timestamp_type timestamp, - const flat_set& cur_features, - const vector& new_features ) - {}, - signer_callback - ); - - my->pending->_block_stage = completed_block{ bsp }; + my->pending->_block_stage = ab.make_completed_block( + my->protocol_features.get_protocol_feature_set(), + [](block_timestamp_type timestamp, const flat_set& cur_features, const vector& new_features) {}, + signer_callback); br = my->pending->_block_report; - - return bsp; } void controller::commit_block() { @@ -3122,50 +3725,48 @@ void controller::set_disable_replay_opts( bool v ) { } uint32_t controller::head_block_num()const { - return my->head->block_num; + return my->head_block_num(); +} +block_timestamp_type controller::head_block_timestamp()const { + return my->block_data.head_block_time(); } time_point controller::head_block_time()const { - return my->head->header.timestamp; + return my->block_data.head_block_time(); } block_id_type controller::head_block_id()const { - return my->head->id; + return my->block_data.head_block_id(); } + account_name controller::head_block_producer()const { - return my->head->header.producer; + return my->block_data.head_block_producer(); } + const block_header& controller::head_block_header()const { - return my->head->header; + return my->block_data.head_block_header(); } -block_state_legacy_ptr controller::head_block_state()const { - return my->head; + +block_state_legacy_ptr controller::head_block_state_legacy()const { + // returns null after instant finality activated + auto dpos_head = [](auto& fork_db, auto& head) -> block_state_legacy_ptr { return head; }; + return my->block_data.apply_dpos(dpos_head); } -block_state_legacy_ptr controller_impl::fork_db_head() const { - if( read_mode == db_read_mode::IRREVERSIBLE ) { - // When in IRREVERSIBLE mode fork_db blocks are marked valid when they become irreversible so that - // fork_db.head() returns irreversible block - // Use pending_head since this method should return the chain head and not last irreversible. - return fork_db.pending_head(); - } else { - return fork_db.head(); - } +const signed_block_ptr& controller::head_block()const { + return my->head_block(); } uint32_t controller::fork_db_head_block_num()const { - return my->fork_db_head()->block_num; + return my->block_data.fork_db_head_block_num(my->read_mode == db_read_mode::IRREVERSIBLE); } -block_id_type controller::fork_db_head_block_id()const { - return my->fork_db_head()->id; +const block_id_type& controller::fork_db_head_block_id()const { + return my->fork_db_head_block_id(); } block_timestamp_type controller::pending_block_timestamp()const { EOS_ASSERT( my->pending, block_validate_exception, "no pending block" ); - - if( std::holds_alternative(my->pending->_block_stage) ) - return std::get(my->pending->_block_stage)._block_state->header.timestamp; - - return my->pending->get_pending_block_header_state_legacy().timestamp; + + return my->pending->timestamp(); } time_point controller::pending_block_time()const { @@ -3174,29 +3775,17 @@ time_point controller::pending_block_time()const { uint32_t controller::pending_block_num()const { EOS_ASSERT( my->pending, block_validate_exception, "no pending block" ); - - if( std::holds_alternative(my->pending->_block_stage) ) - return std::get(my->pending->_block_stage)._block_state->header.block_num(); - - return my->pending->get_pending_block_header_state_legacy().block_num; + return my->pending->block_num(); } account_name controller::pending_block_producer()const { EOS_ASSERT( my->pending, block_validate_exception, "no pending block" ); - - if( std::holds_alternative(my->pending->_block_stage) ) - return std::get(my->pending->_block_stage)._block_state->header.producer; - - return my->pending->get_pending_block_header_state_legacy().producer; + return my->pending->producer(); } -const block_signing_authority& controller::pending_block_signing_authority()const { +block_signing_authority controller::pending_block_signing_authority() const { EOS_ASSERT( my->pending, block_validate_exception, "no pending block" ); - - if( std::holds_alternative(my->pending->_block_stage) ) - return std::get(my->pending->_block_stage)._block_state->valid_block_signing_authority; - - return my->pending->get_pending_block_header_state_legacy().valid_block_signing_authority; + return my->pending->pending_block_signing_authority(); } std::optional controller::pending_producer_block_id()const { @@ -3211,15 +3800,15 @@ void controller::set_hs_irreversible_block_num(uint32_t block_num) { } uint32_t controller::last_irreversible_block_num() const { - return my->fork_db.root()->block_num; + return my->fork_db_root_block_num(); } block_id_type controller::last_irreversible_block_id() const { - return my->fork_db.root()->id; + return my->fork_db_root_block_id(); } time_point controller::last_irreversible_block_time() const { - return my->fork_db.root()->header.timestamp.to_time_point(); + return my->fork_db_root_timestamp().to_time_point(); } @@ -3231,16 +3820,22 @@ const global_property_object& controller::get_global_properties()const { } signed_block_ptr controller::fetch_block_by_id( const block_id_type& id )const { - auto state = my->fork_db.get_block(id); - if( state && state->block ) return state->block; + auto sb_ptr = my->block_data.fork_db_fetch_block_by_id(id); + if( sb_ptr ) return sb_ptr; auto bptr = my->blog.read_block_by_num( block_header::num_from_id(id) ); if( bptr && bptr->calculate_id() == id ) return bptr; return signed_block_ptr(); } std::optional controller::fetch_block_header_by_id( const block_id_type& id )const { +#if 0 + // [greg todo] is the below code equivalent?? auto state = my->fork_db.get_block(id); if( state && state->block ) return state->header; +#else + auto sb_ptr = my->block_data.fork_db_fetch_block_by_id(id); + if( sb_ptr ) return *static_cast(sb_ptr.get()); +#endif auto result = my->blog.read_block_header_by_num( block_header::num_from_id(id) ); if( result && result->calculate_id() == id ) return result; return {}; @@ -3264,14 +3859,22 @@ std::optional controller::fetch_block_header_by_number( uin return my->blog.read_block_header_by_num(block_num); } FC_CAPTURE_AND_RETHROW( (block_num) ) } + block_state_legacy_ptr controller::fetch_block_state_by_id( block_id_type id )const { - auto state = my->fork_db.get_block(id); - return state; + // returns nullptr when in IF mode + auto get_block_state = [&](auto& fork_db, auto& head) -> block_state_legacy_ptr { return fork_db.get_block(id); }; + return my->block_data.apply_dpos(get_block_state); } -block_state_legacy_ptr controller::fetch_block_state_by_number( uint32_t block_num )const { try { - return my->fork_db.search_on_branch( fork_db_head_block_id(), block_num ); -} FC_CAPTURE_AND_RETHROW( (block_num) ) } +block_state_legacy_ptr controller::fetch_block_state_by_number( uint32_t block_num )const { + try { + // returns nullptr when in IF mode + auto fetch_block_state = [&](auto& fork_db, auto& head) -> block_state_legacy_ptr { + return fork_db.search_on_branch( fork_db.head()->id(), block_num); + }; + return my->block_data.apply_dpos(fetch_block_state); + } FC_CAPTURE_AND_RETHROW( (block_num) ) +} block_id_type controller::get_block_id_for_num( uint32_t block_num )const { try { const auto& blog_head = my->blog.head(); @@ -3280,7 +3883,7 @@ block_id_type controller::get_block_id_for_num( uint32_t block_num )const { try if( !find_in_blog ) { auto bsp = fetch_block_state_by_number( block_num ); - if( bsp ) return bsp->id; + if( bsp ) return bsp->id(); } auto id = my->blog.read_block_id_by_num(block_num); @@ -3380,36 +3983,29 @@ void controller::notify_hs_message( const uint32_t connection_id, const hs_messa my->pacemaker->on_hs_msg(connection_id, msg); }; -const producer_authority_schedule& controller::active_producers()const { +const producer_authority_schedule& controller::active_producers()const { if( !(my->pending) ) - return my->head->active_schedule; + return my->block_data.head_active_schedule_auth(); - if( std::holds_alternative(my->pending->_block_stage) ) - return std::get(my->pending->_block_stage)._block_state->active_schedule; - - return my->pending->get_pending_block_header_state_legacy().active_schedule; + return my->pending->active_producers(); } const producer_authority_schedule& controller::pending_producers()const { - if( !(my->pending) ) - return my->head->pending_schedule.schedule; + if( !(my->pending) ) + return my->block_data.head_pending_schedule_auth(); // [greg todo] implement pending_producers correctly for IF mode if( std::holds_alternative(my->pending->_block_stage) ) - return std::get(my->pending->_block_stage)._block_state->pending_schedule.schedule; + return std::get(my->pending->_block_stage).pending_producers(); if( std::holds_alternative(my->pending->_block_stage) ) { - const auto& new_prods_cache = std::get(my->pending->_block_stage)._new_producer_authority_cache; - if( new_prods_cache ) { - return *new_prods_cache; + const auto& pp = std::get(my->pending->_block_stage).pending_producers(); + if( pp ) { + return *pp; } } const auto& bb = std::get(my->pending->_block_stage); - - if( bb._new_pending_producer_schedule ) - return *bb._new_pending_producer_schedule; - - return bb._pending_block_header_state_legacy.prev_pending_schedule.schedule; + return bb.pending_producers(); } std::optional controller::proposed_producers()const { @@ -3589,7 +4185,7 @@ bool controller::is_protocol_feature_activated( const digest_type& feature_diges if( my->pending ) return my->pending->is_protocol_feature_activated( feature_digest ); - const auto& activated_features = my->head->activated_protocol_features->protocol_features; + const auto& activated_features = my->block_data.head_activated_protocol_features()->protocol_features; return (activated_features.find( feature_digest ) != activated_features.end()); } @@ -3768,16 +4364,21 @@ void controller::replace_producer_keys( const public_key_type& key ) { gp.proposed_schedule.version = 0; gp.proposed_schedule.producers.clear(); }); - auto version = my->head->pending_schedule.schedule.version; - my->head->pending_schedule = {}; - my->head->pending_schedule.schedule.version = version; - for (auto& prod: my->head->active_schedule.producers ) { - ilog("${n}", ("n", prod.producer_name)); - std::visit([&](auto &auth) { - auth.threshold = 1; - auth.keys = {key_weight{key, 1}}; - }, prod.authority); - } + + auto replace_keys = [&key](auto& fork_db, auto& head) { + auto version = head->pending_schedule.schedule.version; + head->pending_schedule = {}; + head->pending_schedule.schedule.version = version; + for (auto& prod: head->active_schedule.producers ) { + ilog("${n}", ("n", prod.producer_name)); + std::visit([&](auto &auth) { + auth.threshold = 1; + auth.keys = {key_weight{key, 1}}; + }, prod.authority); + } + }; + + my->block_data.apply_dpos(replace_keys); // [greg todo]: make it work with `apply` instead of `apply_dpos` } void controller::replace_account_keys( name account, name permission, const public_key_type& key ) { @@ -3965,4 +4566,4 @@ void controller_impl::on_activationblock_num) + ("num", bsp->block_num()) ("blk", fc::to_hex(packed_blk)) ); } diff --git a/libraries/chain/fork_database.cpp b/libraries/chain/fork_database.cpp index 1446c60f40..7c1c62a2d8 100644 --- a/libraries/chain/fork_database.cpp +++ b/libraries/chain/fork_database.cpp @@ -11,19 +11,17 @@ #include #include -namespace eosio { namespace chain { +namespace eosio::chain { using boost::multi_index_container; using namespace boost::multi_index; - const uint32_t fork_database::magic_number = 0x30510FDB; + template + const uint32_t fork_database::magic_number = 0x30510FDB; - const uint32_t fork_database::min_supported_version = 2; - const uint32_t fork_database::max_supported_version = 2; - - // work around block_state_legacy::is_valid being private - inline bool block_state_is_valid( const block_state_legacy& bs ) { - return bs.is_valid(); - } + template + const uint32_t fork_database::min_supported_version = 2; + template + const uint32_t fork_database::max_supported_version = 2; /** * History: @@ -34,92 +32,77 @@ namespace eosio { namespace chain { struct by_block_id; struct by_lib_block_num; struct by_prev; - typedef multi_index_container< - block_state_legacy_ptr, - indexed_by< - hashed_unique< tag, member, std::hash>, - ordered_non_unique< tag, const_mem_fun >, - ordered_unique< tag, - composite_key< block_state_legacy, - global_fun, - // see first_preferred comment - member, - member, - member - >, - composite_key_compare< - std::greater, - std::greater, - std::greater, - sha256_less - > - > - > - > fork_multi_index_type; - - bool first_preferred( const block_header_state_legacy& lhs, const block_header_state_legacy& rhs ) { + + template + bool first_preferred( const bs& lhs, const bs& rhs ) { // dpos_irreversible_blocknum == std::numeric_limits::max() after hotstuff activation // hotstuff block considered preferred over dpos // hotstuff blocks compared by block_num as both lhs & rhs dpos_irreversible_blocknum is max uint32_t // This can be simplified in a future release that assumes hotstuff already activated - return std::tie( lhs.dpos_irreversible_blocknum, lhs.block_num ) - > std::tie( rhs.dpos_irreversible_blocknum, rhs.block_num ); + return std::pair(lhs.irreversible_blocknum(), lhs.block_num()) > std::pair(rhs.irreversible_blocknum(), rhs.block_num()); } + template // either [block_state_legacy_ptr, block_state_ptr], same with block_header_state_ptr struct fork_database_impl { - explicit fork_database_impl( const std::filesystem::path& data_dir ) - :datadir(data_dir) - {} + using bs = bsp::element_type; + using bhs = bhsp::element_type; + + using fork_database_t = fork_database; + using branch_type = fork_database_t::branch_type; + using branch_type_pair = fork_database_t::branch_type_pair; + + using fork_multi_index_type = multi_index_container< + bsp, + indexed_by< + hashed_unique, BOOST_MULTI_INDEX_CONST_MEM_FUN(bs, const block_id_type&, id), std::hash>, + ordered_non_unique, const_mem_fun>, + ordered_unique, + composite_key, + composite_key_compare, std::greater, std::greater, sha256_less>>>>; std::shared_mutex mtx; fork_multi_index_type index; - block_state_legacy_ptr root; // Only uses the block_header_state_legacy portion - block_state_legacy_ptr head; + bsp root; // Only uses the block_header_state_legacy portion + bsp head; std::filesystem::path datadir; - void open_impl( const std::function&, - const vector& )>& validator ); - void close_impl(); - - - block_header_state_legacy_ptr get_block_header_impl( const block_id_type& id )const; - block_state_legacy_ptr get_block_impl( const block_id_type& id )const; - void reset_impl( const block_header_state_legacy& root_bhs ); - void rollback_head_to_root_impl(); - void advance_root_impl( const block_id_type& id ); - void remove_impl( const block_id_type& id ); - branch_type fetch_branch_impl( const block_id_type& h, uint32_t trim_after_block_num )const; - block_state_legacy_ptr search_on_branch_impl( const block_id_type& h, uint32_t block_num )const; - pair fetch_branch_from_impl( const block_id_type& first, - const block_id_type& second )const; - void mark_valid_impl( const block_state_legacy_ptr& h ); - - void add_impl( const block_state_legacy_ptr& n, - bool ignore_duplicate, bool validate, - const std::function&, - const vector& )>& validator ); - }; + explicit fork_database_impl( const std::filesystem::path& data_dir ) : datadir(data_dir) {} + + void open_impl( validator_t& validator ); + void close_impl(); + void add_impl( const bsp& n, bool ignore_duplicate, bool validate, validator_t& validator ); + bhsp get_block_header_impl( const block_id_type& id ) const; + bsp get_block_impl( const block_id_type& id ) const; + void reset_impl( const bhs& root_bhs ); + void rollback_head_to_root_impl(); + void advance_root_impl( const block_id_type& id ); + void remove_impl( const block_id_type& id ); + branch_type fetch_branch_impl( const block_id_type& h, uint32_t trim_after_block_num ) const; + bsp search_on_branch_impl( const block_id_type& h, uint32_t block_num ) const; + void mark_valid_impl( const bsp& h ); + branch_type_pair fetch_branch_from_impl( const block_id_type& first, const block_id_type& second ) const; - fork_database::fork_database( const std::filesystem::path& data_dir ) - :my( new fork_database_impl( data_dir ) ) + }; + + template + fork_database::fork_database( const std::filesystem::path& data_dir ) + :my( new fork_database_impl( data_dir ) ) {} - void fork_database::open( const std::function&, - const vector& )>& validator ) - { + template + void fork_database::open( validator_t& validator ) { std::lock_guard g( my->mtx ); my->open_impl( validator ); } - void fork_database_impl::open_impl( const std::function&, - const vector& )>& validator ) - { + template + void fork_database_impl::open_impl( validator_t& validator ) { if (!std::filesystem::is_directory(datadir)) std::filesystem::create_directories(datadir); @@ -134,42 +117,42 @@ namespace eosio { namespace chain { // validate totem uint32_t totem = 0; fc::raw::unpack( ds, totem ); - EOS_ASSERT( totem == fork_database::magic_number, fork_database_exception, + EOS_ASSERT( totem == fork_database_t::magic_number, fork_database_exception, "Fork database file '${filename}' has unexpected magic number: ${actual_totem}. Expected ${expected_totem}", ("filename", fork_db_dat) ("actual_totem", totem) - ("expected_totem", fork_database::magic_number) + ("expected_totem", fork_database_t::magic_number) ); // validate version uint32_t version = 0; fc::raw::unpack( ds, version ); - EOS_ASSERT( version >= fork_database::min_supported_version && version <= fork_database::max_supported_version, + EOS_ASSERT( version >= fork_database_t::min_supported_version && version <= fork_database_t::max_supported_version, fork_database_exception, "Unsupported version of fork database file '${filename}'. " "Fork database version is ${version} while code supports version(s) [${min},${max}]", ("filename", fork_db_dat) ("version", version) - ("min", fork_database::min_supported_version) - ("max", fork_database::max_supported_version) + ("min", fork_database_t::min_supported_version) + ("max", fork_database_t::max_supported_version) ); - block_header_state_legacy bhs; - fc::raw::unpack( ds, bhs ); - reset_impl( bhs ); + bhs state; + fc::raw::unpack( ds, state ); + reset_impl( state ); unsigned_int size; fc::raw::unpack( ds, size ); for( uint32_t i = 0, n = size.value; i < n; ++i ) { - block_state_legacy s; + bs s; fc::raw::unpack( ds, s ); // do not populate transaction_metadatas, they will be created as needed in apply_block with appropriate key recovery s.header_exts = s.block->validate_and_extract_header_extensions(); - add_impl( std::make_shared( std::move( s ) ), false, true, validator ); + add_impl( std::make_shared( std::move( s ) ), false, true, validator ); } block_id_type head_id; fc::raw::unpack( ds, head_id ); - if( root->id == head_id ) { + if( root->id() == head_id ) { head = root; } else { head = get_block_impl( head_id ); @@ -178,9 +161,9 @@ namespace eosio { namespace chain { ("filename", fork_db_dat) ); } - auto candidate = index.get().begin(); - if( candidate == index.get().end() || !(*candidate)->is_valid() ) { - EOS_ASSERT( head->id == root->id, fork_database_exception, + auto candidate = index.template get().begin(); + if( candidate == index.template get().end() || !(*candidate)->is_valid() ) { + EOS_ASSERT( head->id() == root->id(), fork_database_exception, "head not set to root despite no better option available; '${filename}' is likely corrupted", ("filename", fork_db_dat) ); } else { @@ -194,12 +177,14 @@ namespace eosio { namespace chain { } } - void fork_database::close() { + template + void fork_database::close() { std::lock_guard g( my->mtx ); my->close_impl(); } - void fork_database_impl::close_impl() { + template + void fork_database_impl::close_impl() { auto fork_db_dat = datadir / config::forkdb_filename; if( !root ) { @@ -210,14 +195,18 @@ namespace eosio { namespace chain { return; } + // [greg todo] we need support for writing both the old and new format of fork_db to disk. + // I think it would be easier to have a different magic number for the new format (rather than a different + // version), since we do not need to be able to load a fork_db which is meant for a different + // consensus (dpos vs if). std::ofstream out( fork_db_dat.generic_string().c_str(), std::ios::out | std::ios::binary | std::ofstream::trunc ); - fc::raw::pack( out, fork_database::magic_number ); - fc::raw::pack( out, fork_database::max_supported_version ); // write out current version which is always max_supported_version - fc::raw::pack( out, *static_cast(&*root) ); + fc::raw::pack( out, fork_database_t::magic_number ); + fc::raw::pack( out, fork_database_t::max_supported_version ); // write out current version which is always max_supported_version + fc::raw::pack( out, *static_cast(&*root) ); // [greg todo] enought to write only bhs for IF? uint32_t num_blocks_in_fork_db = index.size(); fc::raw::pack( out, unsigned_int{num_blocks_in_fork_db} ); - const auto& indx = index.get(); + const auto& indx = index.template get(); auto unvalidated_itr = indx.rbegin(); auto unvalidated_end = boost::make_reverse_iterator( indx.lower_bound( false ) ); @@ -253,7 +242,7 @@ namespace eosio { namespace chain { } if( head ) { - fc::raw::pack( out, head->id ); + fc::raw::pack( out, head->id() ); } else { elog( "head not set in fork database; '${filename}' will be corrupted", ("filename", fork_db_dat) ); @@ -262,46 +251,53 @@ namespace eosio { namespace chain { index.clear(); } - fork_database::~fork_database() { + template + fork_database::~fork_database() { my->close_impl(); } - void fork_database::reset( const block_header_state_legacy& root_bhs ) { + template + void fork_database::reset( const bhs& root_bhs ) { std::lock_guard g( my->mtx ); my->reset_impl(root_bhs); } - void fork_database_impl::reset_impl( const block_header_state_legacy& root_bhs ) { + template + void fork_database_impl::reset_impl( const bhs& root_bhs ) { index.clear(); - root = std::make_shared(); - static_cast(*root) = root_bhs; - root->validated = true; + root = std::make_shared(); + static_cast(*root) = root_bhs; + root->set_valid(true); head = root; } - void fork_database::rollback_head_to_root() { + template + void fork_database::rollback_head_to_root() { std::lock_guard g( my->mtx ); my->rollback_head_to_root_impl(); } - void fork_database_impl::rollback_head_to_root_impl() { - auto& by_id_idx = index.get(); + template + void fork_database_impl::rollback_head_to_root_impl() { + auto& by_id_idx = index.template get(); auto itr = by_id_idx.begin(); while (itr != by_id_idx.end()) { - by_id_idx.modify( itr, [&]( block_state_legacy_ptr& bsp ) { - bsp->validated = false; + by_id_idx.modify( itr, []( bsp& _bsp ) { + _bsp->set_valid(false); } ); ++itr; } head = root; } - void fork_database::advance_root( const block_id_type& id ) { + template + void fork_database::advance_root( const block_id_type& id ) { std::lock_guard g( my->mtx ); my->advance_root_impl( id ); } - void fork_database_impl::advance_root_impl( const block_id_type& id ) { + template + void fork_database_impl::advance_root_impl( const block_id_type& id ) { EOS_ASSERT( root, fork_database_exception, "root not yet set" ); auto new_root = get_block_impl( id ); @@ -313,9 +309,9 @@ namespace eosio { namespace chain { deque blocks_to_remove; for( auto b = new_root; b; ) { - blocks_to_remove.emplace_back( b->header.previous ); + blocks_to_remove.emplace_back( b->previous() ); b = get_block_impl( blocks_to_remove.back() ); - EOS_ASSERT( b || blocks_to_remove.back() == root->id, fork_database_exception, "invariant violation: orphaned branch was present in forked database" ); + EOS_ASSERT( b || blocks_to_remove.back() == root->id(), fork_database_exception, "invariant violation: orphaned branch was present in forked database" ); } // The new root block should be erased from the fork database index individually rather than with the remove method, @@ -334,13 +330,15 @@ namespace eosio { namespace chain { root = new_root; } - block_header_state_legacy_ptr fork_database::get_block_header( const block_id_type& id )const { + template + bhsp fork_database::get_block_header( const block_id_type& id ) const { std::shared_lock g( my->mtx ); return my->get_block_header_impl( id ); } - block_header_state_legacy_ptr fork_database_impl::get_block_header_impl( const block_id_type& id )const { - if( root->id == id ) { + template + bhsp fork_database_impl::get_block_header_impl( const block_id_type& id ) const { + if( root->id() == id ) { return root; } @@ -348,22 +346,18 @@ namespace eosio { namespace chain { if( itr != index.end() ) return *itr; - return block_header_state_legacy_ptr(); + return bhsp(); } - void fork_database_impl::add_impl( const block_state_legacy_ptr& n, - bool ignore_duplicate, bool validate, - const std::function&, - const vector& )>& validator ) - { + template + void fork_database_impl::add_impl(const bsp& n, bool ignore_duplicate, bool validate, validator_t& validator) { EOS_ASSERT( root, fork_database_exception, "root not yet set" ); EOS_ASSERT( n, fork_database_exception, "attempt to add null block state" ); - auto prev_bh = get_block_header_impl( n->header.previous ); + auto prev_bh = get_block_header_impl( n->previous() ); EOS_ASSERT( prev_bh, unlinkable_block_exception, - "unlinkable block", ("id", n->id)("previous", n->header.previous) ); + "unlinkable block", ("id", n->id())("previous", n->previous()) ); if( validate ) { try { @@ -371,7 +365,7 @@ namespace eosio { namespace chain { if( exts.count(protocol_feature_activation::extension_id()) > 0 ) { const auto& new_protocol_features = std::get(exts.lower_bound(protocol_feature_activation::extension_id())->second).protocol_features; - validator( n->header.timestamp, prev_bh->activated_protocol_features->protocol_features, new_protocol_features ); + validator( n->timestamp(), static_cast(prev_bh.get())->get_activated_protocol_features()->protocol_features, new_protocol_features ); } } EOS_RETHROW_EXCEPTIONS( fork_database_exception, "serialized fork database is incompatible with configured protocol features" ) } @@ -379,16 +373,17 @@ namespace eosio { namespace chain { auto inserted = index.insert(n); if( !inserted.second ) { if( ignore_duplicate ) return; - EOS_THROW( fork_database_exception, "duplicate block added", ("id", n->id) ); + EOS_THROW( fork_database_exception, "duplicate block added", ("id", n->id()) ); } - auto candidate = index.get().begin(); + auto candidate = index.template get().begin(); if( (*candidate)->is_valid() ) { head = *candidate; } } - void fork_database::add( const block_state_legacy_ptr& n, bool ignore_duplicate ) { + template + void fork_database::add( const bsp& n, bool ignore_duplicate ) { std::lock_guard g( my->mtx ); my->add_impl( n, ignore_duplicate, false, []( block_timestamp_type timestamp, @@ -398,19 +393,22 @@ namespace eosio { namespace chain { ); } - block_state_legacy_ptr fork_database::root()const { + template + bsp fork_database::root() const { std::shared_lock g( my->mtx ); return my->root; } - block_state_legacy_ptr fork_database::head()const { + template + bsp fork_database::head() const { std::shared_lock g( my->mtx ); return my->head; } - block_state_legacy_ptr fork_database::pending_head()const { + template + bsp fork_database::pending_head() const { std::shared_lock g( my->mtx ); - const auto& indx = my->index.get(); + const auto& indx = my->index.template get(); auto itr = indx.lower_bound( false ); if( itr != indx.end() && !(*itr)->is_valid() ) { @@ -421,29 +419,36 @@ namespace eosio { namespace chain { return my->head; } - branch_type fork_database::fetch_branch( const block_id_type& h, uint32_t trim_after_block_num )const { - std::shared_lock g( my->mtx ); - return my->fetch_branch_impl( h, trim_after_block_num ); + template + fork_database::branch_type + fork_database::fetch_branch(const block_id_type& h, + uint32_t trim_after_block_num) const { + std::shared_lock g(my->mtx); + return my->fetch_branch_impl(h, trim_after_block_num); } - branch_type fork_database_impl::fetch_branch_impl( const block_id_type& h, uint32_t trim_after_block_num )const { + template + fork_database::branch_type + fork_database_impl::fetch_branch_impl(const block_id_type& h, uint32_t trim_after_block_num) const { branch_type result; - for( auto s = get_block_impl(h); s; s = get_block_impl( s->header.previous ) ) { - if( s->block_num <= trim_after_block_num ) - result.push_back( s ); + for (auto s = get_block_impl(h); s; s = get_block_impl(s->previous())) { + if (s->block_num() <= trim_after_block_num) + result.push_back(s); } return result; } - block_state_legacy_ptr fork_database::search_on_branch( const block_id_type& h, uint32_t block_num )const { + template + bsp fork_database::search_on_branch( const block_id_type& h, uint32_t block_num ) const { std::shared_lock g( my->mtx ); return my->search_on_branch_impl( h, block_num ); } - block_state_legacy_ptr fork_database_impl::search_on_branch_impl( const block_id_type& h, uint32_t block_num )const { - for( auto s = get_block_impl(h); s; s = get_block_impl( s->header.previous ) ) { - if( s->block_num == block_num ) + template + bsp fork_database_impl::search_on_branch_impl( const block_id_type& h, uint32_t block_num ) const { + for( auto s = get_block_impl(h); s; s = get_block_impl( s->previous() ) ) { + if( s->block_num() == block_num ) return s; } @@ -454,52 +459,54 @@ namespace eosio { namespace chain { * Given two head blocks, return two branches of the fork graph that * end with a common ancestor (same prior block) */ - pair< branch_type, branch_type > fork_database::fetch_branch_from( const block_id_type& first, - const block_id_type& second )const { - std::shared_lock g( my->mtx ); - return my->fetch_branch_from_impl( first, second ); + template + fork_database::branch_type_pair + fork_database::fetch_branch_from(const block_id_type& first, const block_id_type& second) const { + std::shared_lock g(my->mtx); + return my->fetch_branch_from_impl(first, second); } - pair< branch_type, branch_type > fork_database_impl::fetch_branch_from_impl( const block_id_type& first, - const block_id_type& second )const { - pair result; - auto first_branch = (first == root->id) ? root : get_block_impl(first); - auto second_branch = (second == root->id) ? root : get_block_impl(second); + template + fork_database::branch_type_pair + fork_database_impl::fetch_branch_from_impl(const block_id_type& first, const block_id_type& second) const { + pair result; + auto first_branch = (first == root->id()) ? root : get_block_impl(first); + auto second_branch = (second == root->id()) ? root : get_block_impl(second); EOS_ASSERT(first_branch, fork_db_block_not_found, "block ${id} does not exist", ("id", first)); EOS_ASSERT(second_branch, fork_db_block_not_found, "block ${id} does not exist", ("id", second)); - while( first_branch->block_num > second_branch->block_num ) + while( first_branch->block_num() > second_branch->block_num() ) { result.first.push_back(first_branch); - const auto& prev = first_branch->header.previous; - first_branch = (prev == root->id) ? root : get_block_impl( prev ); + const auto& prev = first_branch->previous(); + first_branch = (prev == root->id()) ? root : get_block_impl( prev ); EOS_ASSERT( first_branch, fork_db_block_not_found, "block ${id} does not exist", ("id", prev) ); } - while( second_branch->block_num > first_branch->block_num ) + while( second_branch->block_num() > first_branch->block_num() ) { result.second.push_back( second_branch ); - const auto& prev = second_branch->header.previous; - second_branch = (prev == root->id) ? root : get_block_impl( prev ); + const auto& prev = second_branch->previous(); + second_branch = (prev == root->id()) ? root : get_block_impl( prev ); EOS_ASSERT( second_branch, fork_db_block_not_found, "block ${id} does not exist", ("id", prev) ); } - if (first_branch->id == second_branch->id) return result; + if (first_branch->id() == second_branch->id()) return result; - while( first_branch->header.previous != second_branch->header.previous ) + while( first_branch->previous() != second_branch->previous() ) { result.first.push_back(first_branch); result.second.push_back(second_branch); - const auto &first_prev = first_branch->header.previous; + const auto &first_prev = first_branch->previous(); first_branch = get_block_impl( first_prev ); - const auto &second_prev = second_branch->header.previous; + const auto &second_prev = second_branch->previous(); second_branch = get_block_impl( second_prev ); EOS_ASSERT( first_branch, fork_db_block_not_found, "block ${id} does not exist", @@ -520,23 +527,25 @@ namespace eosio { namespace chain { } /// fetch_branch_from_impl /// remove all of the invalid forks built off of this id including this id - void fork_database::remove( const block_id_type& id ) { + template + void fork_database::remove( const block_id_type& id ) { std::lock_guard g( my->mtx ); return my->remove_impl( id ); } - void fork_database_impl::remove_impl( const block_id_type& id ) { + template + void fork_database_impl::remove_impl( const block_id_type& id ) { deque remove_queue{id}; - const auto& previdx = index.get(); - const auto& head_id = head->id; + const auto& previdx = index.template get(); + const auto& head_id = head->id(); for( uint32_t i = 0; i < remove_queue.size(); ++i ) { EOS_ASSERT( remove_queue[i] != head_id, fork_database_exception, "removing the block and its descendants would remove the current head block" ); auto previtr = previdx.lower_bound( remove_queue[i] ); - while( previtr != previdx.end() && (*previtr)->header.previous == remove_queue[i] ) { - remove_queue.emplace_back( (*previtr)->id ); + while( previtr != previdx.end() && (*previtr)->previous() == remove_queue[i] ) { + remove_queue.emplace_back( (*previtr)->id() ); ++previtr; } } @@ -546,41 +555,52 @@ namespace eosio { namespace chain { } } - void fork_database::mark_valid( const block_state_legacy_ptr& h ) { + template + void fork_database::mark_valid( const bsp& h ) { std::lock_guard g( my->mtx ); my->mark_valid_impl( h ); } - void fork_database_impl::mark_valid_impl( const block_state_legacy_ptr& h ) { - if( h->validated ) return; + template + void fork_database_impl::mark_valid_impl( const bsp& h ) { + if( h->is_valid() ) return; - auto& by_id_idx = index.get(); + auto& by_id_idx = index.template get(); - auto itr = by_id_idx.find( h->id ); + auto itr = by_id_idx.find( h->id() ); EOS_ASSERT( itr != by_id_idx.end(), fork_database_exception, "block state not in fork database; cannot mark as valid", - ("id", h->id) ); + ("id", h->id()) ); - by_id_idx.modify( itr, []( block_state_legacy_ptr& bsp ) { - bsp->validated = true; + by_id_idx.modify( itr, []( bsp& _bsp ) { + _bsp->set_valid(true); } ); - auto candidate = index.get().begin(); + auto candidate = index.template get().begin(); if( first_preferred( **candidate, *head ) ) { head = *candidate; } } - block_state_legacy_ptr fork_database::get_block(const block_id_type& id)const { + template + bsp fork_database::get_block(const block_id_type& id) const { std::shared_lock g( my->mtx ); return my->get_block_impl(id); } - block_state_legacy_ptr fork_database_impl::get_block_impl(const block_id_type& id)const { + template + bsp fork_database_impl::get_block_impl(const block_id_type& id) const { auto itr = index.find( id ); if( itr != index.end() ) return *itr; - return block_state_legacy_ptr(); + return bsp(); } -} } /// eosio::chain + // do class instantiations + template class fork_database; + template class fork_database; + + template struct fork_database_impl; + template struct fork_database_impl; + +} /// eosio::chain diff --git a/libraries/chain/hotstuff/block_construction_data_flow.md b/libraries/chain/hotstuff/block_construction_data_flow.md new file mode 100644 index 0000000000..a9400a8ca0 --- /dev/null +++ b/libraries/chain/hotstuff/block_construction_data_flow.md @@ -0,0 +1,229 @@ +Below, `parent` refers to the `block_state` of the parent block from which a new block is being constructed. + +## dpos data + +currently in controller.cpp, we have the `building_block` whose members are: + +```c++ +struct building_block { + pending_block_header_state _pending_block_header_state; // IF: Remove from building_block. See below for replacements. + std::optional _new_pending_producer_schedule; // IF: Replaced by new_proposal_policy. + vector _new_protocol_feature_activations; // IF: Comes from building_block_input::new_protocol_feature_activations + size_t _num_new_protocol_features_that_have_activated = 0; // Stays only in building_block + deque _pending_trx_metas; // Moved from building_block to assembled_block + deque _pending_trx_receipts; // Moved from building_block to the transactions in the constructed block + std::variant _trx_mroot_or_receipt_digests; // IF: Extract computed trx mroot to assembled_block_input::transaction_mroot + digests_t _action_receipt_digests; // IF: Extract computed action mroot to assembled_block_input::action_mroot +}; +``` + +the `assembled_block`: + + +```c++ +struct assembled_block { + block_id_type _id; // Cache of _unsigned_block->calculate_id(). + pending_block_header_state _pending_block_header_state; // IF: Remove from assembled_block. See below for replacements. + deque _trx_metas; // Comes from building_block::_pending_trx_metas + // Carried over to put into block_state (optimization for fork reorgs) + signed_block_ptr _unsigned_block; // IF: keep same member + + // if the _unsigned_block pre-dates block-signing authorities this may be present. + std::optional _new_producer_authority_cache; // IF: Remove from assembled_block + // pending_producers() not needed in IF. proposed_proposers() sufficient. +}; +``` + +and the `pending_block_header_state`: + +```c++ + +struct block_header_state_legacy_common { + uint32_t block_num = 0; // IF: block_header::num_from_id(parent_id) + 1 + uint32_t dpos_proposed_irreversible_blocknum = 0; // Unneeded for IF + uint32_t dpos_irreversible_blocknum = 0; // Unneeded during the building block stage for IF + producer_authority_schedule active_schedule; // IF: Replaced by active_proposer_policy stored in building_block. + incremental_merkle blockroot_merkle; // Unneeded during the building block stage for IF + flat_map producer_to_last_produced; // Unneeded for IF + flat_map producer_to_last_implied_irb; // Unneeded for IF + block_signing_authority valid_block_signing_authority; // IF: Get from within active_proposer_policy for building_block.producer. + vector confirm_count; // Unneeded for IF +}; + +struct pending_block_header_state : public detail::block_header_state_legacy_common { + protocol_feature_activation_set_ptr prev_activated_protocol_features; // IF: building_block.prev_activated_protocol_features + detail::schedule_info prev_pending_schedule; // Unneeded for IF + bool was_pending_promoted = false; // Unneeded for IF + block_id_type previous; // Not needed but present anyway at building_block.parent_id + account_name producer; // IF: building_block.producer + block_timestamp_type timestamp; // IF: building_block.timestamp + uint32_t active_schedule_version = 0; // Unneeded for IF + uint16_t confirmed = 1; // Unneeded for IF +}; +``` + +and all this lives in `pending_state` which I believe can stay unchanged. + +## IF data + +The new storage for IF is: + +```c++ +struct block_header_state_core { + uint32_t last_final_block_num = 0; // last irreversible (final) block. + std::optional final_on_strong_qc_block_num; // will become final if this header achives a strong QC. + std::optional last_qc_block_num; // + uint32_t finalizer_policy_generation; + + block_header_state_core next(uint32_t last_qc_block_num, bool is_last_qc_strong) const; +}; + +struct quorum_certificate { + uint32_t block_num; + valid_quorum_certificate qc; +}; + +struct block_header_state { + block_header header; + protocol_feature_activation_set_ptr activated_protocol_features; + block_header_state_core core; + incremental_merkle_tree proposal_mtree; + incremental_merkle_tree finality_mtree; + finalizer_policy_ptr active_finalizer_policy; // finalizer set + threshold + generation, supports `digest()` + proposer_policy_ptr active_proposer_policy; // producer authority schedule, supports `digest()` + + flat_map proposer_policies; + flat_map finalizer_policies; + + digest_type compute_finalizer_digest() const; + + proposer_policy_ptr get_next_active_proposer_policy(block_timestamp_type next_timestamp) const { + // Find latest proposer policy within proposer_policies that has an active_time <= next_timestamp. + // If found, return the proposer policy that was found. + // Otherwise, return active_proposer_policy. + } + + block_timestamp_type timestamp() const { return header.timestamp; } + account_name producer() const { return header.producer; } + block_id_type previous() const { return header.previous; } + uint32_t block_num() const { return block_header::num_from_id(previous()) + 1; } + + // block descending from this need the provided qc in the block extension + bool is_needed(const quorum_certificate& qc) const { + return !core.last_qc_block_num || qc.block_num > *core.last_qc_block_num; + } + + block_header_state next(const block_header_state_input& data) const; +}; + +struct block_state { + const block_header_state bhs; + const signed_block_ptr block; + + const block_id_type id; // cache of bhs.header.calculate_id() (indexed on this field) + const digest_type finalizer_digest; // cache of bhs.compute_finalizer_digest() + + std::optional pending_qc; + std::optional valid_qc; + + std::optional get_best_qc() const { + // If pending_qc does not have a valid QC, return valid_qc. + // Otherwise, extract the valid QC from *pending_qc. + // Compare that to valid_qc to determine which is better: Strong beats Weak. Break tie with highest accumulated weight. + // Return the better one. + } + + uint64_t block_num() const { return block_header::num_from_id(id); } +}; + +``` + +In addition, in IF `pending_state._block_stage` will still contain the three stages: `building_block`, `assembled_block`, and `completed_block`. + +1. `building_block`: + +```c++ +struct building_block { + const block_id_type parent_id; // Comes from building_block_input::parent_id + const block_timestamp_type timestamp; // Comes from building_block_input::timestamp + const account_name producer; // Comes from building_block_input::producer + const vector new_protocol_feature_activations; // Comes from building_block_input::new_protocol_feature_activations + const protocol_feature_activation_set_ptr prev_activated_protocol_features; // Cached: parent.bhs.activated_protocol_features + const proposer_policy_ptr active_proposer_policy; // Cached: parent.bhs.get_next_active_proposer_policy(timestamp) + + // Members below start from initial state and are mutated as the block is built. + size_t num_new_protocol_features_that_have_activated = 0; + std::optional new_proposer_policy; + std::optional new_finalizer_policy; + deque pending_trx_metas; + deque pending_trx_receipts; + std::variant trx_mroot_or_receipt_digests; + digests_t action_receipt_digests; +}; +``` + +``` +struct building_block { + pending_block_header_state _pending_block_header_state; // IF: Remove from building_block. See below for replacements. + std::optional _new_pending_producer_schedule; // IF: Replaced by new_proposal_policy. + vector _new_protocol_feature_activations; // IF: Comes from building_block_input::new_protocol_feature_activations + size_t _num_new_protocol_features_that_have_activated = 0; // Stays only in building_block + deque _pending_trx_metas; // Moved from building_block to assembled_block + deque _pending_trx_receipts; // Moved from building_block to the transactions in the constructed block + std::variant _trx_mroot_or_receipt_digests; // IF: Extract computed trx mroot to assembled_block_input::transaction_mroot + digests_t _action_receipt_digests; // IF: Extract computed action mroot to assembled_block_input::action_mroot +}; +``` + +which is constructed from: + +```c++ +struct building_block_input { + block_id_type parent_id; + block_timestamp_type timestamp; + account_name producer; + vector new_protocol_feature_activations; +}; +``` + +When done with building the block, from `building_block` we can extract: + +```c++ + +struct block_header_state_input : public building_block_input { + digest_type transaction_mroot; // Comes from std::get(building_block::trx_mroot_or_receipt_digests) + digest_type action_mroot; // Compute root from building_block::action_receipt_digests + std::optional new_proposer_policy; // Comes from building_block::new_proposer_policy + std::optional new_finalizer_policy; // Comes from building_block::new_finalizer_policy + std::optional qc; // Comes from traversing branch from parent and calling get_best_qc() + // assert(qc->block_num <= num_from_id(previous)); + // ... ? +}; +``` + +which is the input needed to `block_header_state::next` to compute the new block header state. + +2. `assembled_block`: + + +```c++ +struct assembled_block { + block_header_state new_block_header_state; + deque trx_metas; // Comes from building_block::pending_trx_metas + // Carried over to put into block_state (optimization for fork reorgs) + deque trx_receipts; // Comes from building_block::pending_trx_receipts + std::optional qc; // QC to add as block extension to new block +}; +``` + +which is constructed from `building_block` and `parent.bhs`. + +3. `completed_block`: + +```c++ +struct completed_block { + block_state_ptr block_state; +}; +``` + +which is constructed from `assembled_block` and a block header signature provider. \ No newline at end of file diff --git a/libraries/chain/hotstuff/chain_pacemaker.cpp b/libraries/chain/hotstuff/chain_pacemaker.cpp index 4e416202e3..2d64185a53 100644 --- a/libraries/chain/hotstuff/chain_pacemaker.cpp +++ b/libraries/chain/hotstuff/chain_pacemaker.cpp @@ -118,7 +118,8 @@ namespace eosio::chain { const auto& [ block, id ] = t; on_irreversible_block( block ); } ); - _head_block_state = chain->head_block_state(); + // TODO: assuming this will be going away + _head_block_state = chain->head_block_state_legacy(); } void chain_pacemaker::register_bcast_function(std::function&, const hs_message&)> broadcast_hs_message) { @@ -161,7 +162,8 @@ namespace eosio::chain { // called from main thread void chain_pacemaker::on_accepted_block( const signed_block_ptr& block ) { std::scoped_lock g( _chain_state_mutex ); - _head_block_state = _chain->fetch_block_state_by_number(block->block_num()); + // TODO: assume this is going away + _head_block_state = _chain->head_block_state_legacy(); } // called from main thread @@ -211,7 +213,7 @@ namespace eosio::chain { block_id_type chain_pacemaker::get_current_block_id() { std::scoped_lock g( _chain_state_mutex ); - return _head_block_state->id; + return _head_block_state->id(); } uint32_t chain_pacemaker::get_quorum_threshold() { diff --git a/libraries/chain/hotstuff/hs_pseudo b/libraries/chain/hotstuff/hs_pseudo new file mode 100644 index 0000000000..d39d4dfcd9 --- /dev/null +++ b/libraries/chain/hotstuff/hs_pseudo @@ -0,0 +1,432 @@ +//notes : under this pseudo code, the hotstuff information is mapped to Antelope concepts : +b_leaf (becomes) -> block_header_state.id //block_state pointer to head + (`head->bhs.id`) + +b_lock (becomes) -> finalizer_safety_information.locked_block_ref + (`block_id_type` of the proposal we voted on and are locked to) + +b_exec (becomes) -> block proposal refered to by block_header_state_core.last_final_block_height //head->last_final_block_height + (`head->bhs.core.last_final_block_height`) + +v_height (becomes) -> finalizer_safety_information.last_vote_block_ref + (`block_id_type` of the last proposal we voted on) + +high_qc (becomes) -> block proposal refered to by block_header_state_core.last_qc_block_height + (fork_db.get_block_by_height(head->bhs.id, head->bhs.core.last_qc_block_height).get_best_qc()) + maybe add new index in fork_db? + +proposal_store is now fork_db + + + +//structures + +struct finalizer_authority { + bls_public_key key; + weight uint32_t; +} + +struct finalizer_policy { + finalizer_authority[] finalizers; + uint32_t weight_quorum_threshold; +} + +struct finalizer_safety_information{ + uint32_t last_vote_range_lower_bound; + uint32_t last_vote_range_upper_bound; + sha256 last_vote_block_ref; //v_height under hotstuff + sha256 locked_block_ref; //b_lock under hotstuff + bool is_last_vote_strong; + bool recovery_mode; //todo : discuss +} + +struct fork_db { + block_handle get_block_by_id(block_id_type id){ [...] //get block by id} + block_handle get_block_by_finalizer_digest(sha256 digest){ [...] //get block by finalizer digest} + block_handle get_block_by_height(block_id_type branch, uint32_t last_qc_block_height){ [...] //on a given branch, get block by height} + block_handle get_head_block(){ [...] //get the head block on the branch I'm looking to extend } +} + +struct block_header_state_core { + uint32_t last_final_block_height; //b_exec under hotstuff + std::optional final_on_strong_qc_block_height; + std::optional last_qc_block_height; //high_qc under hotstuff + + block_header_state_core next(uint32_t last_qc_block_height, bool is_last_qc_strong){ + // no state change if last_qc_block_height is the same + if( last_qc_block_height == this->last_qc_block_height ) { + return {*this}; + } + EOS_ASSERT( last_qc_block_height > this->last_qc_block_height, block_validate_exception, + "new last_qc_block_height must be greater than old last_qc_block_height" ); + auto old_last_qc_block_height = this->last_qc_block_height; + auto old_final_on_strong_qc_block_height = this->final_on_strong_qc_block_height; + block_header_state_core result{*this}; + if( is_last_qc_strong ) { + // last QC is strong. We can progress forward. + // block with old final_on_strong_qc_block_height becomes irreversible + if( old_final_on_strong_qc_block_height.has_value() ) { + //old commit / fork_db.log_irreversible() + result.last_final_block_height = *old_final_on_strong_qc_block_height; + } + // next block which can become irreversible is the block with + // old last_qc_block_height + if( old_last_qc_block_height.has_value() ) { + result.final_on_strong_qc_block_height = *old_last_qc_block_height; + } + } else { + // new final_on_strong_qc_block_height should not be present + result.final_on_strong_qc_block_height.reset(); + // new last_final_block_height should be the same as the old last_final_block_height + } + // new last_qc_block_height is always the input last_qc_block_height. + result.last_qc_block_height = last_qc_block_height; + return result; + } +} + +struct building_block_input { + block_id_type previous; + block_timestamp_type timestamp; + account_name producer; + vector new_protocol_feature_activations; +}; + +// this struct can be extracted from a building block +struct assembled_block_input : public building_block_input { + digest_type transaction_mroot; + digest_type action_mroot; + std::optional new_proposer_policy; + std::optional new_finalizer_policy; + std::optional qc; // assert(qc.block_height <= num_from_id(previous)); +}; + +struct block_header_state { + + //existing block_header_state members + + sha256 id; //b_leaf under hotstuff + + [...] //other existing block_header_state members + + protocol_feature_activation_set_ptr activated_protocol_features; + + //new additions + + block_header_state_core core; + incremental_block_mtree proposal_mtree; + incremental_block_mtree finality_mtree; + + finalizer_policy_ptr finalizer_policy; // finalizer set + threshold + generation, supports `digest()` + proposer_policy_ptr proposer_policy; // producer authority schedule, supports `digest()` + + flat_map proposer_policies; + flat_map finalizer_policies; + + + block_header_state next(const assembled_block_input& data) const { + } + + sha256 compute_finalizer_digest() const { + } +} + +//shared pointer to a block_state +struct block_handle { + block_state_ptr _handle; +} + +struct block_state { + sha256 finalizer_digest; + block_header_state_ptr bhs; + finalizer_policy_ptr active_fp; + std::optional pending_qc; + std::optional valid_qc; + + block_id_type id() const { return bhs->id;} + uint64_t get_height() const { return block_header::num_from_id(bhs->id);} + quorum_certificate get_best_qc() { [...] //return the best QC available } + +} + +//this structure holds the required information and methods for the Hotstuff algorithm. It is derived from a block and block_header content, notably extensions +struct hs_proposal { + //may not exist in final implementation, subject to change + block_id_type block_id; //computed, to be replaced with proposal_digest eventually + uint32_t get_height(); //from block_id + block_timestamp_type timestamp; //from block header + //qc specific information + uint32_t last_qc_block_height; //from block header extension + bool is_last_qc_strong; //from block header extension + valid_quorum_certificate qc; //from block extension +}; + +struct valid_quorum_certificate { + hs_bitset strong_bitset; + optional weak_bitset; //omitted if strong qc + bls_signature signature; //set to strong_signature if strong qc, set to strong_signature + weak_signature if weak qc + + //constructor used for strong qc + valid_quorum_certificate(hs_bitset b, bls_signature s) : + strong_bitset(b), + signature(s) {} + + //constructor used for weak qc + valid_quorum_certificate(hs_bitset sb, hs_bitset wb, bls_signature s) : + strong_bitset(sb), + weak_bitset(wb), + signature(s) {} + + bool is_strong() {if (weak_bitset.has_value()) return false; else return true; } +} + +struct pending_quorum_certificate { + hs_bitset strong_bitset; + bls_signature strong_signature; + hs_bitset weak_bitset; + bls_signature weak_signature; + + bool strong_quorum_met() [...] //abstracted, returns true if a strong quorum is met, false otherwise + bool weak_quorum_met()[...] //abstracted, returns true if a weak quorum is met, false otherwise +} + +struct quorum_certificate { + uint32_t block_height; + valid_quorum_certificate qc; +} + +struct hs_vote_message { + block_id_type block_id; //temporary, probably not needed later + sha256 proposal_digest; //proposal digest + bls_public_key finalizer_key; + bls_signature sig; + bool weak; //indicate if vote is weak, strong otherwise +}; + + +//added as a block_header extension before signing +struct hotstuff_header_extension { + uint32_t last_qc_block_height; + bool is_last_qc_strong; + + std::optional new_finalizer_policy; + std::optional new_proposer_policy; +} + +//added as a block extension before broadcast +struct hotstuff_block_extension { + valid_quorum_certificate qc; +} + +struct signed_block { + [...] //existing signed_block members +} + +//helper functions + +//not currently used +sha256 get_proposal_digest(block_header_state bhs, signed_block p, bool weak){ + //provide a proposal digest with sufficient commitments for a light client to construct proofs of finality and inclusion + //todo : determine require commitments and complete digest function + //note : interface is probably too wide, but serves to illustrate that the proposal digest is generated from elements from the state and elements from the signed block + //temporary implementation (insufficient for IBC but sufficient for internal Hotstuff) + sha256 digest = p.block_id; + if (weak) digest = hash(digest, "_WEAK"); //if weak is set to true, concatenate desambiguator + return digest; +} + +// +hotstuff_header_extension construct_hotstuff_header_extension(quorum_certificate qc, std::optional new_finalizer_policy, std::optional new_proposer_policy){ + return {qc.block_height, qc.is_strong(), new_finalizer_policy, new_proposer_policy}; + +} + +hotstuff_block_extension construct_hotstuff_block_extension(quorum_certificate qc){ + return {qc.qc}; +} + +//get finalizer info from storage, loaded on start, held in cache afterwards +void get_finalizer_info(bls_public_key key){ + [...] //abstracted, must get or create the finalizer safety info state for the given finalizer key +} + +//write the finalizer info to disk to prevent accidental double-signing in case of crash + recovery +void save_finalizer_info(bls_public_key key, finalizer_safety_information fsi){ + [...] //abstracted, must save the finalizer info associated to the key, and throw an exception / prevent additional signing if the write operation fails (?) +} + +bool extends(hs_proposal descendant, hs_proposal ancestor){ + [...] //abstracted, returns true if ancestor is a parent of descendant, false otherwise +} + +void update_pending_qc(hs_vote_message v, block_handle& bc){ + if (bc.valid_qc.has_value()) return; //can only update a pending qc + pending_quorum_certificate pqc = bc.pending_qc.value(); + + //update the current pending_quorum_certificate with new vote information + [...] //abstracted + +} + +hs_proposal extract_proposal(signed_block sb, block_handle& bc){ + hs_proposal p; + [...] //abstracted, see hs_proposal for how to retrieve the values + return p; +} + +enum VoteDecision { + StrongVote, + WeakVote, + NoVote +} + +VoteDecision decide_vote(finalizer_safety_information& fsi, block_handle p){ + + bool monotony_check = false; + bool safety_check = false; + bool liveness_check = false; + + b_phases = get_qc_chain(p); + b2 = b_phases[2] //first phase, prepare + b1 = b_phases[1] //second phase, precommit + b = b_phases[0] //third phase, commit + + if (fsi.last_vote_block_ref != sha256.empty()){ + if (p.timestamp > fork_db.get_block_by_id(fsi.last_vote_block_ref).timestamp){ + monotony_check = true; + } + } + else monotony_check = true; //if I have never voted on a proposal, means the protocol feature just activated and we can proceed + + if (fsi.locked_block_ref != sha256.empty()){ + //Safety check : check if this proposal extends the proposal we're locked on + if (extends(p, fork_db.get_block_by_id(fsi.locked_block_ref)) safety_check = true; + //Liveness check : check if the height of this proposal's justification is higher than the height of the proposal I'm locked on. This allows restoration of liveness if a replica is locked on a stale proposal + if (fork_db.get_block_by_height(p.id(), p.last_qc_block_height).timestamp > fork_db.get_block_by_id(fsi.locked_block_ref).timestamp)) liveness_check = true; + } + else { + //if we're not locked on anything, means the protocol feature just activated and we can proceed + liveness_check = true; + safety_check = true; + } + + if (monotony_check && (liveness_check || safety_check)){ + + uint32_t requested_vote_range_lower_bound = fork_db.get_block_by_height(p.block_id, p.last_qc_block_height).timestamp; + uint32_t requested_vote_range_upper_bound = p.timestamp; + + bool time_range_interference = fsi.last_vote_range_lower_bound < requested_vote_range_upper_bound && requested_vote_range_lower_bound < fsi.last_vote_range_upper_bound; + + //my last vote was on (t9, t10_1], I'm asked to vote on t10 : t9 < t10 && t9 < t10_1; //time_range_interference == true, correct + //my last vote was on (t9, t10_1], I'm asked to vote on t11 : t9 < t11 && t10 < t10_1; //time_range_interference == false, correct + //my last vote was on (t7, t9], I'm asked to vote on t10 : t7 < t10 && t9 < t9; //time_range_interference == false, correct + + bool enough_for_strong_vote = false; + + if (!time_range_interference || extends(p, fork_db.get_block_by_id(fsi.last_vote_block_ref)) enough_for_strong_vote = true; + + //fsi.is_last_vote_strong = enough_for_strong_vote; + fsi.last_vote_block_ref = p.block_id; //v_height + + if (b1.timestamp > fork_db.get_block_by_id(fsi.locked_block_ref).timestamp) fsi.locked_block_ref = b1.block_id; //commit phase on b1 + + fsi.last_vote_range_lower_bound = requested_vote_range_lower_bound; + fsi.last_vote_range_upper_bound = requested_vote_range_upper_bound; + + if (enough_for_strong_vote) return VoteDecision::StrongVote; + else return VoteDecision::WeakVote; + + } + else return VoteDecision::NoVote; +} + +//handlers + +void on_signed_block_received(signed_block sb){ + [...] //verify if block can be linked to our fork database, throw exception if unable to or if duplicate + block_handle previous = fork_db.get_block_by_id(sb.previous); + hs_proposal p = extract_proposal(sb, previous); + on_proposal_received(p, previous); +} + +void on_proposal_received(signed_block_ptr new_block, block_handle& parent){ + + //relevant to all nodes + if (new_block.last_qc_block_height > parent.bhs.last_qc_block_height) { + block_handle found = fork_db.get_block_by_height(new_block.block_id, new_block.last_qc_block_height); + //verify qc is present and if the qc is valid with respect to the found block, throw exception otherwise + + found->valid_qc = new_block.block_extension.qc; + } + + [...] //abstracted, relay proposal to other nodes + + assembled_block_input data = [...] //construct from new_block; + + block_header_state new_block_header_state = parent.bhs.next(data); //f1 & f2 + + block_handle new_block_handle = add_to_fork_db(parent, new_block_header_state); + + bls_public_key[] my_finalizers = [...] //abstracted, must return the public keys of my finalizers that are also active in the current finalizer policy + //only relevant if I have at least one finalizer + if (my_finalizers.size()>0) { + for (auto f : my_finalizers){ + finalizer_safety_information& fsi = get_finalizer_info(f); + vote_decision vd = decide_vote(fsi, new_block_handle); //changes fsi unless NoVote + if (vd == VoteDecision::StrongVote || vd == VoteDecision::WeakVote){ + save_finalizer_info(f, fsi); //save finalizer info to prevent double-voting + hs_vote_message msg = [...] //create + broadcast vote message + } + } + } +} + +//when a node receives a vote on a proposal +void on_vote_received(hs_vote_message v){ + + //[...] check for duplicate or invalid vote, return in either case + + block_handle& bc = fork_db.get_block_by_id(v.block_id); + + [...] //abstracted, relay vote to other nodes + + am_i_leader = [...] //abstracted, must return true if I am the leader, false otherwise + + if(!am_i_leader) return; + + //only leader need to take further action on votes + update_pending_qc(v, bc); //update qc for this proposal + +} + +hs_proposal[] get_qc_chain(hs_proposal p){ + b[]; + b[2] = fork_db.get_block_by_height(p.block_id, p.last_qc_block_height); //first phase, prepare + b[1] = fork_db.get_block_by_height(p.block_id, b[2].last_qc_block_height); //second phase, precommit + b[0] = fork_db.get_block_by_height(p.block_id, b[1].last_qc_block_height); //third phase, commit + return b; +} + +//main algorithm entry point. This replaces on_beat() / create_proposal(), and it is now unified with existing systems +{ + block_handle head = fork_db.get_head_block(); + + [...] //if a new finalizer or proposer policy is needed, add it as new_finalizer_policy, new_proposer_policy + + [...] //abstracted, create block header + + + auto found = fork_db.get_block_with_latest_qc(head); + if (head.bhs.is_needed(found.get_best_qc()) { + //insert block extension if a new qc was created + block_extensions.push(construct_hotstuff_block_extension(found.get_best_qc())); + } + header_extensions.push(construct_hotstuff_header_extension(found.get_best_qc(), new_finalizer_policy, new_proposer_policy)); + [...] //abstracted, complete block + + + [...] //abstracted, sign block header + [...] //broadcast signed_block. The signed_block is processed by the on_signed_block_received handler by other nodes on the network +} + + diff --git a/libraries/chain/include/eosio/chain/abi_serializer.hpp b/libraries/chain/include/eosio/chain/abi_serializer.hpp index 9bdb22480b..d11e014138 100644 --- a/libraries/chain/include/eosio/chain/abi_serializer.hpp +++ b/libraries/chain/include/eosio/chain/abi_serializer.hpp @@ -637,7 +637,7 @@ namespace impl { out(name, std::move(mvo)); } - static void add_block_header_instant_finality_extension( mutable_variant_object& mvo, const flat_multimap& header_exts ); + static void add_block_header_instant_finality_extension( mutable_variant_object& mvo, const header_extension_multimap& header_exts ); /** * overload of to_variant_object for signed_block diff --git a/libraries/chain/include/eosio/chain/block_header.hpp b/libraries/chain/include/eosio/chain/block_header.hpp index 5e6eb4393b..e0ef2087ec 100644 --- a/libraries/chain/include/eosio/chain/block_header.hpp +++ b/libraries/chain/include/eosio/chain/block_header.hpp @@ -24,6 +24,7 @@ namespace eosio { namespace chain { >; using block_header_extension = block_header_extension_types::block_header_extension_t; + using header_extension_multimap = flat_multimap; // totem for block_header.confirmed that indicates hotstuff consensus is active constexpr uint16_t hs_block_confirmed = std::numeric_limits::max(); @@ -76,7 +77,7 @@ namespace eosio { namespace chain { uint32_t block_num() const { return num_from_id(previous) + 1; } static uint32_t num_from_id(const block_id_type& id); - flat_multimap validate_and_extract_header_extensions()const; + header_extension_multimap validate_and_extract_header_extensions()const; std::optional extract_header_extension(uint16_t extension_id)const; }; diff --git a/libraries/chain/include/eosio/chain/block_header_state.hpp b/libraries/chain/include/eosio/chain/block_header_state.hpp new file mode 100644 index 0000000000..03d848c857 --- /dev/null +++ b/libraries/chain/include/eosio/chain/block_header_state.hpp @@ -0,0 +1,94 @@ +#pragma once +#include +#include +#include +#include +#include +#include +#include + +namespace eosio::chain { + +namespace detail { struct schedule_info; }; + +using proposer_policy_ptr = std::shared_ptr; + +struct building_block_input { + block_id_type parent_id; + block_timestamp_type timestamp; + account_name producer; + vector new_protocol_feature_activations; +}; + +// this struct can be extracted from a building block +struct block_header_state_input : public building_block_input { + digest_type transaction_mroot; // Comes from std::get(building_block::trx_mroot_or_receipt_digests) + digest_type action_mroot; // Compute root from building_block::action_receipt_digests + std::optional new_proposer_policy; // Comes from building_block::new_proposer_policy + std::optional new_finalizer_policy; // Comes from building_block::new_finalizer_policy + std::optional qc; // Comes from traversing branch from parent and calling get_best_qc() + // assert(qc->block_num <= num_from_id(previous)); + // ... ? +}; + +struct block_header_state_core { + uint32_t last_final_block_height = 0; // last irreversible (final) block. + std::optional final_on_strong_qc_block_height; // will become final if this header achives a strong QC. + std::optional last_qc_block_height; // + uint32_t finalizer_policy_generation; // + + block_header_state_core next(uint32_t last_qc_block_height, bool is_last_qc_strong) const; +}; + +struct block_header_state { + // ------ data members ------------------------------------------------------------ + block_id_type id; + block_header header; + protocol_feature_activation_set_ptr activated_protocol_features; + + block_header_state_core core; + incremental_merkle_tree proposal_mtree; + incremental_merkle_tree finality_mtree; + + finalizer_policy_ptr finalizer_policy; // finalizer set + threshold + generation, supports `digest()` + proposer_policy_ptr proposer_policy; // producer authority schedule, supports `digest()` + + flat_map proposer_policies; + flat_map finalizer_policies; + + // ------ functions ----------------------------------------------------------------- + digest_type compute_finalizer_digest() const; + block_timestamp_type timestamp() const { return header.timestamp; } + account_name producer() const { return header.producer; } + const block_id_type& previous() const { return header.previous; } + uint32_t block_num() const { return block_header::num_from_id(previous()) + 1; } + const producer_authority_schedule& active_schedule_auth() const { return proposer_policy->proposer_schedule; } + const producer_authority_schedule& pending_schedule_auth() const { return proposer_policies.rbegin()->second->proposer_schedule; } // [greg todo] + + block_header_state next(const block_header_state_input& data) const; + + // block descending from this need the provided qc in the block extension + bool is_needed(const quorum_certificate& qc) const { + return !core.last_qc_block_height || qc.block_height > *core.last_qc_block_height; + } + + protocol_feature_activation_set_ptr get_prev_activated_protocol_features() const { return {}; } // [greg todo] + flat_set get_activated_protocol_features() const { return activated_protocol_features->protocol_features; } + detail::schedule_info prev_pending_schedule() const; + uint32_t active_schedule_version() const; + std::optional& new_pending_producer_schedule() { static std::optional x; return x; } // [greg todo] + signed_block_header make_block_header(const checksum256_type& transaction_mroot, + const checksum256_type& action_mroot, + const std::optional& new_producers, + vector&& new_protocol_feature_activations, + const protocol_feature_set& pfs) const; + uint32_t increment_finalizer_policy_generation() { return ++core.finalizer_policy_generation; } +}; + +using block_header_state_ptr = std::shared_ptr; + +} + +// [greg todo] which members need to be serialized to disk when saving fork_db +// obviously many are missing below. +FC_REFLECT( eosio::chain::block_header_state, (id)) diff --git a/libraries/chain/include/eosio/chain/block_header_state_legacy.hpp b/libraries/chain/include/eosio/chain/block_header_state_legacy.hpp index ae62111275..9ab9d6a1ad 100644 --- a/libraries/chain/include/eosio/chain/block_header_state_legacy.hpp +++ b/libraries/chain/include/eosio/chain/block_header_state_legacy.hpp @@ -80,10 +80,11 @@ namespace detail { builtin_protocol_feature_t feature_codename ); } +using validator_t = const std::function&, const vector&)>; + struct pending_block_header_state_legacy : public detail::block_header_state_legacy_common { protocol_feature_activation_set_ptr prev_activated_protocol_features; detail::schedule_info prev_pending_schedule; - std::optional proposed_finalizer_policy; // set by set_finalizer host function bool was_pending_promoted = false; block_id_type previous; account_name producer; @@ -100,51 +101,20 @@ struct pending_block_header_state_legacy : public detail::block_header_state_leg block_header_state_legacy finish_next( const signed_block_header& h, vector&& additional_signatures, const protocol_feature_set& pfs, - const std::function&, - const vector& )>& validator, + validator_t& validator, bool skip_validate_signee = false )&&; block_header_state_legacy finish_next( signed_block_header& h, const protocol_feature_set& pfs, - const std::function&, - const vector& )>& validator, + validator_t& validator, const signer_callback_type& signer )&&; protected: block_header_state_legacy _finish_next( const signed_block_header& h, const protocol_feature_set& pfs, - const std::function&, - const vector& )>& validator )&&; + validator_t& validator )&&; }; -/** - * @struct block_header_state_core - * - * A data structure holding hotstuff core information - */ -struct block_header_state_core { - // the block height of the last irreversible (final) block. - uint32_t last_final_block_height = 0; - - // the block height of the block that would become irreversible (final) if the - // associated block header was to achieve a strong QC. - std::optional final_on_strong_qc_block_height; - - // the block height of the block that is referenced as the last QC block - std::optional last_qc_block_height; - - block_header_state_core() = default; - - explicit block_header_state_core( uint32_t last_final_block_height, - std::optional final_on_strong_qc_block_height, - std::optional last_qc_block_height ); - - block_header_state_core next( uint32_t last_qc_block_height, - bool is_last_qc_strong); -}; /** * @struct block_header_state * @@ -186,7 +156,7 @@ struct block_header_state_legacy : public detail::block_header_state_legacy_comm /// this data is redundant with the data stored in header, but it acts as a cache that avoids /// duplication of work - flat_multimap header_exts; + header_extension_multimap header_exts; block_header_state_legacy() = default; @@ -196,15 +166,13 @@ struct block_header_state_legacy : public detail::block_header_state_legacy_comm explicit block_header_state_legacy( legacy::snapshot_block_header_state_v2&& snapshot ); - pending_block_header_state_legacy next( block_timestamp_type when, bool hotstuff_activated, uint16_t num_prev_blocks_to_confirm )const; + pending_block_header_state_legacy next( block_timestamp_type when, uint16_t num_prev_blocks_to_confirm )const; block_header_state_legacy next( const signed_block_header& h, vector&& additional_signatures, const protocol_feature_set& pfs, bool hotstuff_activated, - const std::function&, - const vector& )>& validator, + validator_t& validator, bool skip_validate_signee = false )const; uint32_t calc_dpos_last_irreversible( account_name producer_of_next_block )const; diff --git a/libraries/chain/include/eosio/chain/block_state.hpp b/libraries/chain/include/eosio/chain/block_state.hpp new file mode 100644 index 0000000000..6c98e1f2f1 --- /dev/null +++ b/libraries/chain/include/eosio/chain/block_state.hpp @@ -0,0 +1,42 @@ +#pragma once + +#include +#include +#include +#include + +namespace eosio::chain { + + struct block_state : public block_header_state { // block_header_state provides parent link + // ------ data members ------------------------------------------------------------- + signed_block_ptr block; + bool validated; // We have executed the block's trxs and verified that action merkle root (block id) matches. + digest_type finalizer_digest; + pending_quorum_certificate pending_qc; // where we accumulate votes we receive + std::optional valid_qc; // qc received from the network + + + // ------ data members caching information available elsewhere ---------------------- + block_id_type cached_id; // cache of block_header_state::header.calculate_id() (indexed on this field) + header_extension_multimap header_exts; // redundant with the data stored in header + + // ------ functions ----------------------------------------------------------------- + const block_id_type& id() const { return cached_id; } + const block_id_type& previous() const { return block_header_state::previous(); } + uint32_t block_num() const { return block_header_state::block_num(); } + block_timestamp_type timestamp() const { return block_header_state::timestamp(); } + const extensions_type& header_extensions() const { return block_header_state::header.header_extensions; } + bool is_valid() const { return validated; } + void set_valid(bool b) { validated = b; } + uint32_t irreversible_blocknum() const { return 0; } // [greg todo] equivalent of dpos_irreversible_blocknum + + protocol_feature_activation_set_ptr get_activated_protocol_features() const { return block_header_state::activated_protocol_features; } + deque extract_trxs_metas() { return {}; }; // [greg todo] see impl in block_state_legacy.hpp + }; + +using block_state_ptr = std::shared_ptr; + +} // namespace eosio::chain + +// [greg todo] which members need to be serialized to disk when saving fork_db +FC_REFLECT_DERIVED( eosio::chain::block_state, (eosio::chain::block_header_state), (block)(validated) ) diff --git a/libraries/chain/include/eosio/chain/block_state_legacy.hpp b/libraries/chain/include/eosio/chain/block_state_legacy.hpp index ffc5a8699d..b8c1876f21 100644 --- a/libraries/chain/include/eosio/chain/block_state_legacy.hpp +++ b/libraries/chain/include/eosio/chain/block_state_legacy.hpp @@ -12,9 +12,7 @@ namespace eosio { namespace chain { signed_block_ptr b, const protocol_feature_set& pfs, bool hotstuff_activated, - const std::function&, - const vector& )>& validator, + const validator_t& validator, bool skip_validate_signee ); @@ -22,29 +20,39 @@ namespace eosio { namespace chain { signed_block_ptr&& b, // unsigned block deque&& trx_metas, const protocol_feature_set& pfs, - const std::function&, - const vector& )>& validator, + const validator_t& validator, const signer_callback_type& signer ); block_state_legacy() = default; - signed_block_ptr block; + signed_block_ptr block; + // internal use only, not thread safe + const block_id_type& id() const { return block_header_state_legacy::id; } + const block_id_type& previous() const { return block_header_state_legacy::prev(); } + uint32_t irreversible_blocknum() const { return dpos_irreversible_blocknum; } + uint32_t block_num() const { return block_header_state_legacy::block_num; } + block_timestamp_type timestamp() const { return header.timestamp; } + account_name producer() const { return header.producer; } + const extensions_type& header_extensions() const { return header.header_extensions; } + bool is_valid() const { return validated; } + void set_valid(bool b) { validated = b; } + + protocol_feature_activation_set_ptr get_activated_protocol_features() const { return activated_protocol_features; } + const producer_authority_schedule& active_schedule_auth() const { return block_header_state_legacy_common::active_schedule; } + const producer_authority_schedule& pending_schedule_auth() const { return block_header_state_legacy::pending_schedule.schedule; } + const deque& trxs_metas() const { return _cached_trxs; } + + private: // internal use only, not thread safe friend struct fc::reflector; - friend bool block_state_is_valid( const block_state_legacy& ); // work-around for multi-index access friend struct controller_impl; - friend class fork_database; - friend struct fork_database_impl; - friend class unapplied_transaction_queue; - friend struct pending_state; + friend struct completed_block; - bool is_valid()const { return validated; } bool is_pub_keys_recovered()const { return _pub_keys_recovered; } - + deque extract_trxs_metas() { _pub_keys_recovered = false; auto result = std::move( _cached_trxs ); @@ -55,7 +63,6 @@ namespace eosio { namespace chain { _pub_keys_recovered = keys_recovered; _cached_trxs = std::move( trxs_metas ); } - const deque& trxs_metas()const { return _cached_trxs; } bool validated = false; @@ -66,7 +73,6 @@ namespace eosio { namespace chain { }; using block_state_legacy_ptr = std::shared_ptr; - using branch_type = deque; } } /// namespace eosio::chain diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index 41bb4ae40f..03de4f8e44 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -1,9 +1,11 @@ #pragma once #include +#include #include #include #include #include +#include #include #include @@ -48,14 +50,16 @@ namespace eosio::chain { class subjective_billing; using resource_limits::resource_limits_manager; using apply_handler = std::function; + + using fork_database_legacy = fork_database; + using branch_type = typename fork_database_legacy::branch_type; + using forked_branch_callback = std::function; // lookup transaction_metadata via supplied function to avoid re-creation using trx_meta_cache_lookup = std::function; using block_signal_params = std::tuple; - class fork_database; - enum class db_read_mode { HEAD, IRREVERSIBLE, @@ -171,7 +175,7 @@ namespace eosio::chain { fc::microseconds total_time{}; }; - block_state_legacy_ptr finalize_block( block_report& br, const signer_callback_type& signer_callback ); + void finalize_block( block_report& br, const signer_callback_type& signer_callback ); void sign_block( const signer_callback_type& signer_callback ); void commit_block(); @@ -195,7 +199,7 @@ namespace eosio::chain { const chainbase::database& db()const; - const fork_database& fork_db()const; + const fork_database_legacy& fork_db()const; const account_object& get_account( account_name n )const; const global_property_object& get_global_properties()const; @@ -226,18 +230,21 @@ namespace eosio::chain { uint32_t head_block_num()const; time_point head_block_time()const; + block_timestamp_type head_block_timestamp()const; block_id_type head_block_id()const; account_name head_block_producer()const; const block_header& head_block_header()const; - block_state_legacy_ptr head_block_state()const; + const signed_block_ptr& head_block()const; + // returns nullptr after instant finality enabled + block_state_legacy_ptr head_block_state_legacy()const; uint32_t fork_db_head_block_num()const; - block_id_type fork_db_head_block_id()const; + const block_id_type& fork_db_head_block_id()const; time_point pending_block_time()const; block_timestamp_type pending_block_timestamp()const; account_name pending_block_producer()const; - const block_signing_authority& pending_block_signing_authority()const; + block_signing_authority pending_block_signing_authority()const; std::optional pending_producer_block_id()const; uint32_t pending_block_num()const; diff --git a/libraries/chain/include/eosio/chain/fork_database.hpp b/libraries/chain/include/eosio/chain/fork_database.hpp index 2367631096..b2967d9f67 100644 --- a/libraries/chain/include/eosio/chain/fork_database.hpp +++ b/libraries/chain/include/eosio/chain/fork_database.hpp @@ -1,11 +1,13 @@ #pragma once #include +#include #include -namespace eosio { namespace chain { +namespace eosio::chain { using boost::signals2::signal; + template struct fork_database_impl; /** @@ -19,82 +21,85 @@ namespace eosio { namespace chain { * * An internal mutex is used to provide thread-safety. */ + template // either [block_state_legacy_ptr, block_state_ptr], same with block_header_state_ptr class fork_database { - public: - - explicit fork_database( const std::filesystem::path& data_dir ); - ~fork_database(); - - void open( const std::function&, - const vector& )>& validator ); - void close(); - - block_header_state_legacy_ptr get_block_header( const block_id_type& id )const; - block_state_legacy_ptr get_block( const block_id_type& id )const; - - /** - * Purges any existing blocks from the fork database and resets the root block_header_state to the provided value. - * The head will also be reset to point to the root. - */ - void reset( const block_header_state_legacy& root_bhs ); - - /** - * Removes validated flag from all blocks in fork database and resets head to point to the root. - */ - void rollback_head_to_root(); - - /** - * Advance root block forward to some other block in the tree. - */ - void advance_root( const block_id_type& id ); - - /** - * Add block state to fork database. - * Must link to existing block in fork database or the root. - */ - void add( const block_state_legacy_ptr& next_block, bool ignore_duplicate = false ); - - void remove( const block_id_type& id ); - - block_state_legacy_ptr root()const; - block_state_legacy_ptr head()const; - block_state_legacy_ptr pending_head()const; - - /** - * Returns the sequence of block states resulting from trimming the branch from the - * root block (exclusive) to the block with an id of `h` (inclusive) by removing any - * block states corresponding to block numbers greater than `trim_after_block_num`. - * - * The order of the sequence is in descending block number order. - * A block with an id of `h` must exist in the fork database otherwise this method will throw an exception. - */ - branch_type fetch_branch( const block_id_type& h, uint32_t trim_after_block_num = std::numeric_limits::max() )const; - - - /** - * Returns the block state with a block number of `block_num` that is on the branch that - * contains a block with an id of`h`, or the empty shared pointer if no such block can be found. - */ - block_state_legacy_ptr search_on_branch( const block_id_type& h, uint32_t block_num )const; - - /** - * Given two head blocks, return two branches of the fork graph that - * end with a common ancestor (same prior block) - */ - pair< branch_type, branch_type > fetch_branch_from( const block_id_type& first, - const block_id_type& second )const; - - - void mark_valid( const block_state_legacy_ptr& h ); - - static const uint32_t magic_number; - - static const uint32_t min_supported_version; - static const uint32_t max_supported_version; - - private: - unique_ptr my; + public: + using bs = bsp::element_type; + using bhs = bhsp::element_type; + using branch_type = deque; + using branch_type_pair = pair; + + explicit fork_database( const std::filesystem::path& data_dir ); + ~fork_database(); + + void open( validator_t& validator ); + void close(); + + bhsp get_block_header( const block_id_type& id ) const; + bsp get_block( const block_id_type& id ) const; + + /** + * Purges any existing blocks from the fork database and resets the root block_header_state to the provided value. + * The head will also be reset to point to the root. + */ + void reset( const bhs& root_bhs ); + + /** + * Removes validated flag from all blocks in fork database and resets head to point to the root. + */ + void rollback_head_to_root(); + + /** + * Advance root block forward to some other block in the tree. + */ + void advance_root( const block_id_type& id ); + + /** + * Add block state to fork database. + * Must link to existing block in fork database or the root. + */ + void add( const bsp& next_block, bool ignore_duplicate = false ); + + void remove( const block_id_type& id ); + + bsp root() const; + bsp head() const; + bsp pending_head() const; + + /** + * Returns the sequence of block states resulting from trimming the branch from the + * root block (exclusive) to the block with an id of `h` (inclusive) by removing any + * block states corresponding to block numbers greater than `trim_after_block_num`. + * + * The order of the sequence is in descending block number order. + * A block with an id of `h` must exist in the fork database otherwise this method will throw an exception. + */ + branch_type fetch_branch( const block_id_type& h, uint32_t trim_after_block_num = std::numeric_limits::max() ) const; + + + /** + * Returns the block state with a block number of `block_num` that is on the branch that + * contains a block with an id of`h`, or the empty shared pointer if no such block can be found. + */ + bsp search_on_branch( const block_id_type& h, uint32_t block_num ) const; + + /** + * Given two head blocks, return two branches of the fork graph that + * end with a common ancestor (same prior block) + */ + branch_type_pair fetch_branch_from(const block_id_type& first, const block_id_type& second) const; + + void mark_valid( const bsp& h ); + + static const uint32_t magic_number; + + static const uint32_t min_supported_version; + static const uint32_t max_supported_version; + + private: + unique_ptr> my; }; -} } /// eosio::chain + using fork_database_legacy = fork_database; + +} /// eosio::chain diff --git a/libraries/chain/include/eosio/chain/hotstuff/hotstuff.hpp b/libraries/chain/include/eosio/chain/hotstuff/hotstuff.hpp index b81a3c7308..88bf7eb530 100644 --- a/libraries/chain/include/eosio/chain/hotstuff/hotstuff.hpp +++ b/libraries/chain/include/eosio/chain/hotstuff/hotstuff.hpp @@ -9,7 +9,7 @@ namespace eosio::chain { - using hs_bitset = boost::dynamic_bitset; + using hs_bitset = boost::dynamic_bitset; using bls_key_map_t = std::map; inline digest_type get_digest_to_sign(const block_id_type& block_id, uint8_t phase_counter, const fc::sha256& final_on_qc) { diff --git a/libraries/chain/include/eosio/chain/producer_schedule.hpp b/libraries/chain/include/eosio/chain/producer_schedule.hpp index af4f513bc8..9a2a5831bd 100644 --- a/libraries/chain/include/eosio/chain/producer_schedule.hpp +++ b/libraries/chain/include/eosio/chain/producer_schedule.hpp @@ -1,6 +1,7 @@ #pragma once #include #include +#include #include #include #include @@ -249,6 +250,12 @@ namespace eosio { namespace chain { uint32_t version = 0; ///< sequentially incrementing version number vector producers; + const producer_authority& get_scheduled_producer( block_timestamp_type t )const { + auto index = t.slot % (producers.size() * config::producer_repetitions); + index /= config::producer_repetitions; + return producers[index]; + } + friend bool operator == ( const producer_authority_schedule& a, const producer_authority_schedule& b ) { if( a.version != b.version ) return false; diff --git a/libraries/chain/include/eosio/chain/protocol_feature_activation.hpp b/libraries/chain/include/eosio/chain/protocol_feature_activation.hpp index 1f1a49fef5..fc4a5d45b1 100644 --- a/libraries/chain/include/eosio/chain/protocol_feature_activation.hpp +++ b/libraries/chain/include/eosio/chain/protocol_feature_activation.hpp @@ -7,23 +7,6 @@ namespace eosio { namespace chain { struct protocol_feature_activation : fc::reflect_init { static constexpr uint16_t extension_id() { return 0; } static constexpr bool enforce_unique() { return true; } - - protocol_feature_activation() = default; - - protocol_feature_activation( const vector& pf ) - :protocol_features( pf ) - {} - - protocol_feature_activation( vector&& pf ) - :protocol_features( std::move(pf) ) - {} - - protocol_feature_activation(const protocol_feature_activation&) = default; - protocol_feature_activation(protocol_feature_activation&&) = default; - - protocol_feature_activation& operator=(protocol_feature_activation&&) = default; - protocol_feature_activation& operator=(const protocol_feature_activation&) = default; - void reflector_init(); vector protocol_features; diff --git a/libraries/testing/include/eosio/testing/tester.hpp b/libraries/testing/include/eosio/testing/tester.hpp index 7a69c5fe0e..e2297de848 100644 --- a/libraries/testing/include/eosio/testing/tester.hpp +++ b/libraries/testing/include/eosio/testing/tester.hpp @@ -640,8 +640,8 @@ namespace eosio { namespace testing { bool validate() { - auto hbh = control->head_block_state()->header; - auto vn_hbh = validating_node->head_block_state()->header; + const auto& hbh = control->head_block_header(); + const auto& vn_hbh = validating_node->head_block_header(); bool ok = control->head_block_id() == validating_node->head_block_id() && hbh.previous == vn_hbh.previous && hbh.timestamp == vn_hbh.timestamp && diff --git a/libraries/testing/tester.cpp b/libraries/testing/tester.cpp index ef21548be8..e4abdace2b 100644 --- a/libraries/testing/tester.cpp +++ b/libraries/testing/tester.cpp @@ -398,7 +398,6 @@ namespace eosio { namespace testing { signed_block_ptr base_tester::_produce_block( fc::microseconds skip_time, bool skip_pending_trxs, bool no_throw, std::vector& traces ) { - auto head = control->head_block_state(); auto head_time = control->head_block_time(); auto next_time = head_time + skip_time; @@ -438,7 +437,7 @@ namespace eosio { namespace testing { void base_tester::_start_block(fc::time_point block_time) { auto head_block_number = control->head_block_num(); - auto producer = control->head_block_state()->get_scheduled_producer(block_time); + auto producer = control->active_producers().get_scheduled_producer(block_time); auto last_produced_block_num = control->last_irreversible_block_num(); auto itr = last_produced_block.find(producer.producer_name); @@ -473,16 +472,17 @@ namespace eosio { namespace testing { signed_block_ptr base_tester::_finish_block() { FC_ASSERT( control->is_building_block(), "must first start a block before it can be finished" ); - auto producer = control->head_block_state()->get_scheduled_producer( control->pending_block_time() ); + auto auth = control->pending_block_signing_authority(); + auto producer_name = control->pending_block_producer(); vector signing_keys; - auto default_active_key = get_public_key( producer.producer_name, "active"); - producer.for_each_key([&](const public_key_type& key){ + auto default_active_key = get_public_key( producer_name, "active"); + producer_authority::for_each_key(auth, [&](const public_key_type& key){ const auto& iter = block_signing_private_keys.find(key); if(iter != block_signing_private_keys.end()) { signing_keys.push_back(iter->second); } else if (key == default_active_key) { - signing_keys.emplace_back( get_private_key( producer.producer_name, "active") ); + signing_keys.emplace_back( get_private_key( producer_name, "active") ); } }); @@ -497,9 +497,9 @@ namespace eosio { namespace testing { } ); control->commit_block(); - last_produced_block[control->head_block_state()->header.producer] = control->head_block_state()->id; + last_produced_block[producer_name] = control->head_block_id(); - return control->head_block_state()->block; + return control->head_block(); } signed_block_ptr base_tester::produce_block( std::vector& traces ) { @@ -547,7 +547,7 @@ namespace eosio { namespace testing { void base_tester::produce_min_num_of_blocks_to_spend_time_wo_inactive_prod(const fc::microseconds target_elapsed_time) { fc::microseconds elapsed_time; while (elapsed_time < target_elapsed_time) { - for(uint32_t i = 0; i < control->head_block_state()->active_schedule.producers.size(); i++) { + for(uint32_t i = 0; i < control->active_producers().producers.size(); i++) { const auto time_to_skip = fc::milliseconds(config::producer_repetitions * config::block_interval_ms); produce_block(time_to_skip); elapsed_time += time_to_skip; diff --git a/plugins/chain_api_plugin/chain.swagger.yaml b/plugins/chain_api_plugin/chain.swagger.yaml index 08d831fad0..5bef6ef0d0 100644 --- a/plugins/chain_api_plugin/chain.swagger.yaml +++ b/plugins/chain_api_plugin/chain.swagger.yaml @@ -186,30 +186,6 @@ paths: schema: description: Returns Nothing - /get_block_header_state: - post: - description: Retrieves the glock header state - operationId: get_block_header_state - requestBody: - content: - application/json: - schema: - type: object - required: - - block_num_or_id - properties: - block_num_or_id: - type: string - description: Provide a block_number or a block_id - - responses: - "200": - description: OK - content: - application/json: - schema: - $ref: "https://docs.eosnetwork.com/openapi/v2.0/BlockHeaderState.yaml" - /get_abi: post: description: Retrieves the ABI for a contract based on its account name diff --git a/plugins/chain_api_plugin/chain_api_plugin.cpp b/plugins/chain_api_plugin/chain_api_plugin.cpp index 1fc626d9e8..4d5b6c85c8 100644 --- a/plugins/chain_api_plugin/chain_api_plugin.cpp +++ b/plugins/chain_api_plugin/chain_api_plugin.cpp @@ -132,7 +132,6 @@ void chain_api_plugin::plugin_startup() { CHAIN_RO_CALL(get_activated_protocol_features, 200, http_params_types::possible_no_params), CHAIN_RO_CALL_POST(get_block, fc::variant, 200, http_params_types::params_required), // _POST because get_block() returns a lambda to be executed on the http thread pool CHAIN_RO_CALL(get_block_info, 200, http_params_types::params_required), - CHAIN_RO_CALL(get_block_header_state, 200, http_params_types::params_required), CHAIN_RO_CALL_POST(get_account, chain_apis::read_only::get_account_results, 200, http_params_types::params_required), CHAIN_RO_CALL(get_code, 200, http_params_types::params_required), CHAIN_RO_CALL(get_code_hash, 200, http_params_types::params_required), diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index af7719b061..ee3a564499 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -1984,9 +1984,9 @@ fc::variant read_only::convert_block( const chain::signed_block_ptr& block, abi_ fc::variant read_only::get_block_info(const read_only::get_block_info_params& params, const fc::time_point&) const { - signed_block_ptr block; + std::optional block; try { - block = db.fetch_block_by_number( params.block_num ); + block = db.fetch_block_header_by_number( params.block_num ); } catch (...) { // assert below will handle the invalid block num } @@ -2011,29 +2011,6 @@ fc::variant read_only::get_block_info(const read_only::get_block_info_params& pa ("ref_block_prefix", ref_block_prefix); } -fc::variant read_only::get_block_header_state(const get_block_header_state_params& params, const fc::time_point&) const { - block_state_legacy_ptr b; - std::optional block_num; - std::exception_ptr e; - try { - block_num = fc::to_uint64(params.block_num_or_id); - } catch( ... ) {} - - if( block_num ) { - b = db.fetch_block_state_by_number(*block_num); - } else { - try { - b = db.fetch_block_state_by_id(fc::variant(params.block_num_or_id).as()); - } EOS_RETHROW_EXCEPTIONS(chain::block_id_type_exception, "Invalid block ID: ${block_num_or_id}", ("block_num_or_id", params.block_num_or_id)) - } - - EOS_ASSERT( b, unknown_block_exception, "Could not find reversible block: ${block}", ("block", params.block_num_or_id)); - - fc::variant vo; - fc::to_variant( static_cast(*b), vo ); - return vo; -} - void read_write::push_block(read_write::push_block_params&& params, next_function next) { try { app().get_method()(std::make_shared( std::move(params) ), std::optional{}, block_state_legacy_ptr{}); diff --git a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp index 39c48d3bfe..cd1fd5b0aa 100644 --- a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp +++ b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp @@ -409,12 +409,6 @@ class read_only : public api_base { fc::variant get_block_info(const get_block_info_params& params, const fc::time_point& deadline) const; - struct get_block_header_state_params { - string block_num_or_id; - }; - - fc::variant get_block_header_state(const get_block_header_state_params& params, const fc::time_point& deadline) const; - struct get_table_rows_params { bool json = false; name code; @@ -1067,7 +1061,6 @@ FC_REFLECT(eosio::chain_apis::read_only::get_activated_protocol_features_params, FC_REFLECT(eosio::chain_apis::read_only::get_activated_protocol_features_results, (activated_protocol_features)(more) ) FC_REFLECT(eosio::chain_apis::read_only::get_raw_block_params, (block_num_or_id)) FC_REFLECT(eosio::chain_apis::read_only::get_block_info_params, (block_num)) -FC_REFLECT(eosio::chain_apis::read_only::get_block_header_state_params, (block_num_or_id)) FC_REFLECT(eosio::chain_apis::read_only::get_block_header_params, (block_num_or_id)(include_extensions)) FC_REFLECT(eosio::chain_apis::read_only::get_block_header_result, (id)(signed_block_header)(block_extensions)) diff --git a/plugins/chain_plugin/test/test_trx_finality_status_processing.cpp b/plugins/chain_plugin/test/test_trx_finality_status_processing.cpp index 770fe31e46..5bff8f075c 100644 --- a/plugins/chain_plugin/test/test_trx_finality_status_processing.cpp +++ b/plugins/chain_plugin/test/test_trx_finality_status_processing.cpp @@ -78,13 +78,13 @@ chain::block_id_type make_block_id( uint32_t block_num ) { return block_id; } -chain::transaction_trace_ptr make_transaction_trace( const packed_transaction_ptr trx, uint32_t block_number, const eosio::chain::block_state_legacy_ptr& bs_ptr, +chain::transaction_trace_ptr make_transaction_trace( const packed_transaction_ptr trx, uint32_t block_number, const eosio::chain::signed_block_ptr& b_ptr, chain::transaction_receipt_header::status_enum status = eosio::chain::transaction_receipt_header::executed ) { return std::make_shared(chain::transaction_trace{ trx->id(), block_number, chain::block_timestamp_type(fc::time_point::now()), - bs_ptr ? bs_ptr->id : std::optional {}, + b_ptr ? b_ptr->calculate_id() : std::optional {}, chain::transaction_receipt_header{status}, fc::microseconds(0), 0, @@ -98,7 +98,7 @@ chain::transaction_trace_ptr make_transaction_trace( const packed_transaction_pt }); } -auto make_block_state( uint32_t block_num ) { +auto make_block( uint32_t block_num ) { static uint64_t unique_num = 0; ++unique_num; chain::block_id_type block_id = make_block_id(block_num); @@ -113,43 +113,11 @@ auto make_block_state( uint32_t block_num ) { auto priv_key = get_private_key( block->producer, "active" ); auto pub_key = get_public_key( block->producer, "active" ); - auto prev = std::make_shared(); - auto header_bmroot = chain::digest_type::hash( std::make_pair( block->digest(), prev->blockroot_merkle.get_root())); - auto sig_digest = chain::digest_type::hash( std::make_pair( header_bmroot, prev->pending_schedule.schedule_hash )); + auto header_bmroot = chain::digest_type::hash( std::make_pair( block->digest(), block_id_type{})); + auto sig_digest = chain::digest_type::hash( std::make_pair( header_bmroot, digest_type{} )); block->producer_signature = priv_key.sign( sig_digest ); - std::vector signing_keys; - signing_keys.emplace_back( priv_key ); - auto signer = [&]( chain::digest_type d ) { - std::vector result; - result.reserve( signing_keys.size()); - for( const auto& k: signing_keys ) - result.emplace_back( k.sign( d )); - return result; - }; - chain::pending_block_header_state_legacy pbhs; - pbhs.producer = block->producer; - pbhs.timestamp = block->timestamp; - pbhs.previous = block->previous; - chain::producer_authority_schedule schedule = - {0, {chain::producer_authority{block->producer, - chain::block_signing_authority_v0{1, {{pub_key, 1}}}}}}; - pbhs.active_schedule = schedule; - pbhs.valid_block_signing_authority = chain::block_signing_authority_v0{1, {{pub_key, 1}}}; - auto bsp = std::make_shared( - std::move( pbhs ), - std::move( block ), - deque(), - chain::protocol_feature_set(), - []( chain::block_timestamp_type timestamp, - const fc::flat_set& cur_features, - const std::vector& new_features ) {}, - signer - ); - bsp->id = block_id; - bsp->block_num = block_num; - - return bsp; + return block; } std::string set_now(const char* date, const char* time) { @@ -172,9 +140,9 @@ BOOST_AUTO_TEST_CASE(trx_finality_status_logic) { try { using trx_deque = eosio::chain::deque< std::tuple< chain::transaction_trace_ptr, packed_transaction_ptr > >; uint32_t bn = 20; - auto add = [&bn, &status](trx_deque& trx_pairs, const eosio::chain::block_state_legacy_ptr& bs_ptr) { + auto add = [&bn, &status](trx_deque& trx_pairs, const eosio::chain::signed_block_ptr& b_ptr) { auto trx = make_unique_trx(fc::seconds(2)); - auto trace = make_transaction_trace( trx, bn, bs_ptr); + auto trace = make_transaction_trace( trx, bn, b_ptr); trx_pairs.push_back(std::tuple(trace, trx)); status.signal_applied_transaction(trace, trx); }; @@ -183,12 +151,12 @@ BOOST_AUTO_TEST_CASE(trx_finality_status_logic) { try { // Create speculative block to begin applying transactions locally status.signal_block_start(bn); - const eosio::chain::block_state_legacy_ptr no_bs; + const eosio::chain::signed_block_ptr no_b; - add(trx_pairs_20, no_bs); - add(trx_pairs_20, no_bs); - add(trx_pairs_20, no_bs); - add(trx_pairs_20, no_bs); + add(trx_pairs_20, no_b); + add(trx_pairs_20, no_b); + add(trx_pairs_20, no_b); + add(trx_pairs_20, no_b); auto cs = status.get_chain_state(); BOOST_CHECK(cs.head_id == eosio::chain::block_id_type{}); @@ -237,62 +205,62 @@ BOOST_AUTO_TEST_CASE(trx_finality_status_logic) { try { //Make a real block start. Pull these before any updates to the trx/trace objects. // send block 20 - const auto bs_20 = make_block_state(bn); + const auto b_20 = make_block(bn); status.signal_block_start(bn); for (const auto& trx_tuple : trx_pairs_20) { const auto& trace = std::get<0>(trx_tuple); const auto& txn = std::get<1>(trx_tuple); - trace->producer_block_id = bs_20->id; - trace->block_time = bs_20->block->timestamp; + trace->producer_block_id = b_20->calculate_id(); + trace->block_time = b_20->timestamp; status.signal_applied_transaction(trace, txn); } // and 2 new transactions const auto block_20_time = set_now("2022-04-04", "04:44:44.500"); - add(trx_pairs_20, bs_20); - add(trx_pairs_20, bs_20); - status.signal_accepted_block(bs_20->block, bs_20->id); + add(trx_pairs_20, b_20); + add(trx_pairs_20, b_20); + status.signal_accepted_block(b_20, b_20->calculate_id()); cs = status.get_chain_state(); - BOOST_CHECK(cs.head_id == bs_20->id); + BOOST_CHECK(cs.head_id == b_20->calculate_id()); BOOST_CHECK(cs.head_id == *std::get<0>(trx_pairs_20[0])->producer_block_id); BOOST_CHECK(cs.head_id == *std::get<0>(trx_pairs_20[1])->producer_block_id); BOOST_CHECK(cs.head_id == *std::get<0>(trx_pairs_20[2])->producer_block_id); BOOST_CHECK(cs.head_id == *std::get<0>(trx_pairs_20[3])->producer_block_id); - BOOST_CHECK(cs.head_block_timestamp == bs_20->block->timestamp); + BOOST_CHECK(cs.head_block_timestamp == b_20->timestamp); BOOST_CHECK(cs.irr_id == eosio::chain::block_id_type{}); - BOOST_CHECK(cs.earliest_tracked_block_id == bs_20->id); + BOOST_CHECK(cs.earliest_tracked_block_id == b_20->calculate_id()); ts = status.get_trx_state(std::get<1>(trx_pairs_20[0])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_20->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_20->block->timestamp); + BOOST_CHECK(ts->block_id == b_20->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_20->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), pre_block_20_time); BOOST_CHECK_EQUAL(ts->status, "IN_BLOCK"); ts = status.get_trx_state(std::get<1>(trx_pairs_20[1])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_20->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_20->block->timestamp); + BOOST_CHECK(ts->block_id == b_20->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_20->timestamp); BOOST_CHECK(fc::time_point_sec(ts->expiration) == (std::get<1>(trx_pairs_20[1])->expiration())); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), pre_block_20_time); BOOST_CHECK_EQUAL(ts->status, "IN_BLOCK"); ts = status.get_trx_state(std::get<1>(trx_pairs_20[2])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_20->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_20->block->timestamp); + BOOST_CHECK(ts->block_id == b_20->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_20->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), block_20_time); BOOST_CHECK_EQUAL(ts->status, "IN_BLOCK"); ts = status.get_trx_state(std::get<1>(trx_pairs_20[3])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_20->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_20->block->timestamp); + BOOST_CHECK(ts->block_id == b_20->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_20->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), block_20_time); BOOST_CHECK_EQUAL(ts->status, "IN_BLOCK"); @@ -316,45 +284,45 @@ BOOST_AUTO_TEST_CASE(trx_finality_status_logic) { try { const auto block_21_time = set_now("2022-04-04", "04:44:45.000"); trx_deque trx_pairs_21; bn = 21; - const auto bs_21 = make_block_state(bn); + const auto b_21 = make_block(bn); status.signal_block_start(bn); fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); - add(trx_pairs_21, bs_21); - status.signal_accepted_block(bs_21->block, bs_21->id); + add(trx_pairs_21, b_21); + status.signal_accepted_block(b_21, b_21->calculate_id()); cs = status.get_chain_state(); - BOOST_CHECK(cs.head_id == bs_21->id); + BOOST_CHECK(cs.head_id == b_21->calculate_id()); BOOST_CHECK(cs.head_id == *std::get<0>(trx_pairs_21[0])->producer_block_id); - BOOST_CHECK(cs.head_block_timestamp == bs_21->block->timestamp); + BOOST_CHECK(cs.head_block_timestamp == b_21->timestamp); BOOST_CHECK(cs.irr_id == eosio::chain::block_id_type{}); - BOOST_CHECK(cs.earliest_tracked_block_id == bs_20->id); + BOOST_CHECK(cs.earliest_tracked_block_id == b_20->calculate_id()); ts = status.get_trx_state(std::get<1>(trx_pairs_20[0])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_20->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_20->block->timestamp); + BOOST_CHECK(ts->block_id == b_20->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_20->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), pre_block_20_time); BOOST_CHECK_EQUAL(ts->status, "IN_BLOCK"); ts = status.get_trx_state(std::get<1>(trx_pairs_20[1])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_20->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_20->block->timestamp); + BOOST_CHECK(ts->block_id == b_20->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_20->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), pre_block_20_time); BOOST_CHECK_EQUAL(ts->status, "IN_BLOCK"); ts = status.get_trx_state(std::get<1>(trx_pairs_20[2])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_20->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_20->block->timestamp); + BOOST_CHECK(ts->block_id == b_20->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_20->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), block_20_time); BOOST_CHECK_EQUAL(ts->status, "IN_BLOCK"); ts = status.get_trx_state(std::get<1>(trx_pairs_20[3])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_20->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_20->block->timestamp); + BOOST_CHECK(ts->block_id == b_20->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_20->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), block_20_time); BOOST_CHECK_EQUAL(ts->status, "IN_BLOCK"); @@ -374,8 +342,8 @@ BOOST_AUTO_TEST_CASE(trx_finality_status_logic) { try { ts = status.get_trx_state(std::get<1>(trx_pairs_21[0])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_21->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_21->block->timestamp); + BOOST_CHECK(ts->block_id == b_21->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_21->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), block_21_time); BOOST_CHECK_EQUAL(ts->status, "IN_BLOCK"); @@ -386,45 +354,45 @@ BOOST_AUTO_TEST_CASE(trx_finality_status_logic) { try { trx_deque trx_pairs_22; bn = 22; - const auto bs_22 = make_block_state(bn); + const auto b_22 = make_block(bn); status.signal_block_start(bn); - add(trx_pairs_22, bs_22); - status.signal_accepted_block(bs_22->block, bs_22->id); + add(trx_pairs_22, b_22); + status.signal_accepted_block(b_22, b_22->calculate_id()); cs = status.get_chain_state(); - BOOST_CHECK(cs.head_id == bs_22->id); + BOOST_CHECK(cs.head_id == b_22->calculate_id()); BOOST_CHECK(cs.head_id == *std::get<0>(trx_pairs_22[0])->producer_block_id); - BOOST_CHECK(cs.head_block_timestamp == bs_22->block->timestamp); + BOOST_CHECK(cs.head_block_timestamp == b_22->timestamp); BOOST_CHECK(cs.irr_id == eosio::chain::block_id_type{}); - BOOST_CHECK(cs.earliest_tracked_block_id == bs_20->id); + BOOST_CHECK(cs.earliest_tracked_block_id == b_20->calculate_id()); ts = status.get_trx_state(std::get<1>(trx_pairs_20[0])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_20->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_20->block->timestamp); + BOOST_CHECK(ts->block_id == b_20->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_20->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), pre_block_20_time); BOOST_CHECK_EQUAL(ts->status, "IN_BLOCK"); ts = status.get_trx_state(std::get<1>(trx_pairs_20[1])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_20->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_20->block->timestamp); + BOOST_CHECK(ts->block_id == b_20->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_20->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), pre_block_20_time); BOOST_CHECK_EQUAL(ts->status, "IN_BLOCK"); ts = status.get_trx_state(std::get<1>(trx_pairs_20[2])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_20->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_20->block->timestamp); + BOOST_CHECK(ts->block_id == b_20->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_20->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), block_20_time); BOOST_CHECK_EQUAL(ts->status, "IN_BLOCK"); ts = status.get_trx_state(std::get<1>(trx_pairs_20[3])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_20->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_20->block->timestamp); + BOOST_CHECK(ts->block_id == b_20->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_20->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), block_20_time); BOOST_CHECK_EQUAL(ts->status, "IN_BLOCK"); @@ -444,65 +412,62 @@ BOOST_AUTO_TEST_CASE(trx_finality_status_logic) { try { ts = status.get_trx_state(std::get<1>(trx_pairs_21[0])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_21->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_21->block->timestamp); + BOOST_CHECK(ts->block_id == b_21->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_21->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), block_21_time); BOOST_CHECK_EQUAL(ts->status, "IN_BLOCK"); ts = status.get_trx_state(std::get<1>(trx_pairs_22[0])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_22->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_22->block->timestamp); + BOOST_CHECK(ts->block_id == b_22->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_22->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), block_22_time); BOOST_CHECK_EQUAL(ts->status, "IN_BLOCK"); - - - // send block 22 const auto block_22_alt_time = set_now("2022-04-04", "04:44:46.000"); trx_deque trx_pairs_22_alt; bn = 22; - const auto bs_22_alt = make_block_state(bn); + const auto b_22_alt = make_block(bn); status.signal_block_start(bn); - add(trx_pairs_22_alt, bs_22_alt); - status.signal_accepted_block(bs_22_alt->block, bs_22_alt->id); + add(trx_pairs_22_alt, b_22_alt); + status.signal_accepted_block(b_22_alt, b_22_alt->calculate_id()); cs = status.get_chain_state(); - BOOST_CHECK(cs.head_id == bs_22_alt->id); + BOOST_CHECK(cs.head_id == b_22_alt->calculate_id()); BOOST_CHECK(cs.head_id == *std::get<0>(trx_pairs_22_alt[0])->producer_block_id); - BOOST_CHECK(cs.head_block_timestamp == bs_22_alt->block->timestamp); + BOOST_CHECK(cs.head_block_timestamp == b_22_alt->timestamp); BOOST_CHECK(cs.irr_id == eosio::chain::block_id_type{}); - BOOST_CHECK(cs.earliest_tracked_block_id == bs_20->id); + BOOST_CHECK(cs.earliest_tracked_block_id == b_20->calculate_id()); ts = status.get_trx_state(std::get<1>(trx_pairs_20[0])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_20->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_20->block->timestamp); + BOOST_CHECK(ts->block_id == b_20->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_20->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), pre_block_20_time); BOOST_CHECK_EQUAL(ts->status, "IN_BLOCK"); ts = status.get_trx_state(std::get<1>(trx_pairs_20[1])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_20->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_20->block->timestamp); + BOOST_CHECK(ts->block_id == b_20->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_20->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), pre_block_20_time); BOOST_CHECK_EQUAL(ts->status, "IN_BLOCK"); ts = status.get_trx_state(std::get<1>(trx_pairs_20[2])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_20->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_20->block->timestamp); + BOOST_CHECK(ts->block_id == b_20->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_20->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), block_20_time); BOOST_CHECK_EQUAL(ts->status, "IN_BLOCK"); ts = status.get_trx_state(std::get<1>(trx_pairs_20[3])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_20->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_20->block->timestamp); + BOOST_CHECK(ts->block_id == b_20->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_20->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), block_20_time); BOOST_CHECK_EQUAL(ts->status, "IN_BLOCK"); @@ -522,22 +487,22 @@ BOOST_AUTO_TEST_CASE(trx_finality_status_logic) { try { ts = status.get_trx_state(std::get<1>(trx_pairs_21[0])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_21->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_21->block->timestamp); + BOOST_CHECK(ts->block_id == b_21->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_21->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), block_21_time); BOOST_CHECK_EQUAL(ts->status, "IN_BLOCK"); ts = status.get_trx_state(std::get<1>(trx_pairs_22[0])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_22->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_22->block->timestamp); + BOOST_CHECK(ts->block_id == b_22->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_22->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), block_22_time); BOOST_CHECK_EQUAL(ts->status, "FORKED_OUT"); ts = status.get_trx_state(std::get<1>(trx_pairs_22_alt[0])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_22_alt->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_22_alt->block->timestamp); + BOOST_CHECK(ts->block_id == b_22_alt->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_22_alt->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), block_22_alt_time); BOOST_CHECK_EQUAL(ts->status, "IN_BLOCK"); @@ -549,45 +514,45 @@ BOOST_AUTO_TEST_CASE(trx_finality_status_logic) { try { trx_deque trx_pairs_19; bn = 19; - const auto bs_19 = make_block_state(bn); + const auto b_19 = make_block(bn); status.signal_block_start(bn); - add(trx_pairs_19, bs_19); - status.signal_accepted_block(bs_19->block, bs_19->id); + add(trx_pairs_19, b_19); + status.signal_accepted_block(b_19, b_19->calculate_id()); cs = status.get_chain_state(); - BOOST_CHECK(cs.head_id == bs_19->id); + BOOST_CHECK(cs.head_id == b_19->calculate_id()); BOOST_CHECK(cs.head_id == *std::get<0>(trx_pairs_19[0])->producer_block_id); - BOOST_CHECK(cs.head_block_timestamp == bs_19->block->timestamp); + BOOST_CHECK(cs.head_block_timestamp == b_19->timestamp); BOOST_CHECK(cs.irr_id == eosio::chain::block_id_type{}); - BOOST_CHECK(cs.earliest_tracked_block_id == bs_19->id); + BOOST_CHECK(cs.earliest_tracked_block_id == b_19->calculate_id()); ts = status.get_trx_state(std::get<1>(trx_pairs_20[0])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_20->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_20->block->timestamp); + BOOST_CHECK(ts->block_id == b_20->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_20->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), pre_block_20_time); BOOST_CHECK_EQUAL(ts->status, "FAILED"); ts = status.get_trx_state(std::get<1>(trx_pairs_20[1])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_20->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_20->block->timestamp); + BOOST_CHECK(ts->block_id == b_20->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_20->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), pre_block_20_time); BOOST_CHECK_EQUAL(ts->status, "FAILED"); ts = status.get_trx_state(std::get<1>(trx_pairs_20[2])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_20->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_20->block->timestamp); + BOOST_CHECK(ts->block_id == b_20->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_20->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), block_20_time); BOOST_CHECK_EQUAL(ts->status, "FAILED"); ts = status.get_trx_state(std::get<1>(trx_pairs_20[3])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_20->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_20->block->timestamp); + BOOST_CHECK(ts->block_id == b_20->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_20->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), block_20_time); BOOST_CHECK_EQUAL(ts->status, "FAILED"); @@ -607,30 +572,30 @@ BOOST_AUTO_TEST_CASE(trx_finality_status_logic) { try { ts = status.get_trx_state(std::get<1>(trx_pairs_21[0])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_21->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_21->block->timestamp); + BOOST_CHECK(ts->block_id == b_21->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_21->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), block_21_time); BOOST_CHECK_EQUAL(ts->status, "FAILED"); fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); ts = status.get_trx_state(std::get<1>(trx_pairs_22[0])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_22->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_22->block->timestamp); + BOOST_CHECK(ts->block_id == b_22->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_22->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), block_22_time); BOOST_CHECK_EQUAL(ts->status, "FAILED"); ts = status.get_trx_state(std::get<1>(trx_pairs_22_alt[0])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_22_alt->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_22_alt->block->timestamp); + BOOST_CHECK(ts->block_id == b_22_alt->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_22_alt->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), block_22_alt_time); BOOST_CHECK_EQUAL(ts->status, "FORKED_OUT"); ts = status.get_trx_state(std::get<1>(trx_pairs_19[0])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_19->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_19->block->timestamp); + BOOST_CHECK(ts->block_id == b_19->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_19->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), block_19_time); BOOST_CHECK_EQUAL(ts->status, "IN_BLOCK"); @@ -647,62 +612,62 @@ BOOST_AUTO_TEST_CASE(trx_finality_status_logic) { try { trx_pairs_19_alt.push_back(trx_pairs_20[3]); trx_pairs_19_alt.push_back(hold_pairs[0]); - const auto bs_19_alt = make_block_state(bn); - // const auto bs_19_alt = make_block_state(make_block_id(bn), std::vector{}); + const auto b_19_alt = make_block(bn); + // const auto b_19_alt = make_block(make_block_id(bn), std::vector{}); status.signal_block_start(bn); for (const auto& trx_tuple : trx_pairs_19_alt) { const auto& trace = std::get<0>(trx_tuple); const auto& txn = std::get<1>(trx_tuple); - trace->producer_block_id = bs_19_alt->id; - trace->block_time = bs_19_alt->block->timestamp; + trace->producer_block_id = b_19_alt->calculate_id(); + trace->block_time = b_19_alt->timestamp; status.signal_applied_transaction(trace, txn); } - status.signal_accepted_block(bs_19_alt->block, bs_19_alt->id); + status.signal_accepted_block(b_19_alt, b_19_alt->calculate_id()); cs = status.get_chain_state(); - BOOST_CHECK(cs.head_id == bs_19_alt->id); + BOOST_CHECK(cs.head_id == b_19_alt->calculate_id()); BOOST_CHECK(cs.head_id == *std::get<0>(trx_pairs_19[0])->producer_block_id); - BOOST_CHECK(cs.head_block_timestamp == bs_19_alt->block->timestamp); + BOOST_CHECK(cs.head_block_timestamp == b_19_alt->timestamp); BOOST_CHECK(cs.irr_id == eosio::chain::block_id_type{}); - BOOST_CHECK(cs.earliest_tracked_block_id == bs_19_alt->id); + BOOST_CHECK(cs.earliest_tracked_block_id == b_19_alt->calculate_id()); ts = status.get_trx_state(std::get<1>(trx_pairs_20[0])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_19_alt->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_19_alt->block->timestamp); + BOOST_CHECK(ts->block_id == b_19_alt->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_19_alt->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), pre_block_20_time); BOOST_CHECK_EQUAL(ts->status, "IN_BLOCK"); ts = status.get_trx_state(std::get<1>(trx_pairs_20[1])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_19_alt->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_19_alt->block->timestamp); + BOOST_CHECK(ts->block_id == b_19_alt->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_19_alt->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), pre_block_20_time); BOOST_CHECK_EQUAL(ts->status, "IN_BLOCK"); ts = status.get_trx_state(std::get<1>(trx_pairs_20[2])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_19_alt->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_19_alt->block->timestamp); + BOOST_CHECK(ts->block_id == b_19_alt->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_19_alt->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), block_20_time); BOOST_CHECK_EQUAL(ts->status, "IN_BLOCK"); ts = status.get_trx_state(std::get<1>(trx_pairs_20[3])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_19_alt->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_19_alt->block->timestamp); + BOOST_CHECK(ts->block_id == b_19_alt->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_19_alt->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), block_20_time); BOOST_CHECK_EQUAL(ts->status, "IN_BLOCK"); ts = status.get_trx_state(std::get<1>(hold_pairs[0])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_19_alt->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_19_alt->block->timestamp); + BOOST_CHECK(ts->block_id == b_19_alt->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_19_alt->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), pre_block_20_time); BOOST_CHECK_EQUAL(ts->status, "IN_BLOCK"); @@ -715,30 +680,30 @@ BOOST_AUTO_TEST_CASE(trx_finality_status_logic) { try { ts = status.get_trx_state(std::get<1>(trx_pairs_21[0])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_21->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_21->block->timestamp); + BOOST_CHECK(ts->block_id == b_21->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_21->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), block_21_time); BOOST_CHECK_EQUAL(ts->status, "FORKED_OUT"); fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); ts = status.get_trx_state(std::get<1>(trx_pairs_22[0])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_22->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_22->block->timestamp); + BOOST_CHECK(ts->block_id == b_22->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_22->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), block_22_time); BOOST_CHECK_EQUAL(ts->status, "FORKED_OUT"); ts = status.get_trx_state(std::get<1>(trx_pairs_22_alt[0])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_22_alt->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_22_alt->block->timestamp); + BOOST_CHECK(ts->block_id == b_22_alt->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_22_alt->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), block_22_alt_time); BOOST_CHECK_EQUAL(ts->status, "FORKED_OUT"); ts = status.get_trx_state(std::get<1>(trx_pairs_19[0])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_19_alt->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_19_alt->block->timestamp); + BOOST_CHECK(ts->block_id == b_19_alt->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_19_alt->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), block_19_time); BOOST_CHECK_EQUAL(ts->status, "IN_BLOCK"); @@ -750,47 +715,47 @@ BOOST_AUTO_TEST_CASE(trx_finality_status_logic) { try { BOOST_REQUIRE(!ts); // irreversible - status.signal_irreversible_block(bs_19_alt->block, bs_19_alt->id); + status.signal_irreversible_block(b_19_alt, b_19_alt->calculate_id()); cs = status.get_chain_state(); - BOOST_CHECK(cs.head_id == bs_19_alt->id); - BOOST_CHECK(cs.irr_id == bs_19_alt->id); - BOOST_CHECK(cs.irr_block_timestamp == bs_19_alt->block->timestamp); - BOOST_CHECK(cs.earliest_tracked_block_id == bs_19_alt->id); + BOOST_CHECK(cs.head_id == b_19_alt->calculate_id()); + BOOST_CHECK(cs.irr_id == b_19_alt->calculate_id()); + BOOST_CHECK(cs.irr_block_timestamp == b_19_alt->timestamp); + BOOST_CHECK(cs.earliest_tracked_block_id == b_19_alt->calculate_id()); ts = status.get_trx_state(std::get<1>(trx_pairs_20[0])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_19_alt->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_19_alt->block->timestamp); + BOOST_CHECK(ts->block_id == b_19_alt->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_19_alt->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), pre_block_20_time); BOOST_CHECK_EQUAL(ts->status, "IRREVERSIBLE"); ts = status.get_trx_state(std::get<1>(trx_pairs_20[1])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_19_alt->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_19_alt->block->timestamp); + BOOST_CHECK(ts->block_id == b_19_alt->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_19_alt->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), pre_block_20_time); BOOST_CHECK_EQUAL(ts->status, "IRREVERSIBLE"); ts = status.get_trx_state(std::get<1>(trx_pairs_20[2])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_19_alt->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_19_alt->block->timestamp); + BOOST_CHECK(ts->block_id == b_19_alt->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_19_alt->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), block_20_time); BOOST_CHECK_EQUAL(ts->status, "IRREVERSIBLE"); ts = status.get_trx_state(std::get<1>(trx_pairs_20[3])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_19_alt->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_19_alt->block->timestamp); + BOOST_CHECK(ts->block_id == b_19_alt->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_19_alt->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), block_20_time); BOOST_CHECK_EQUAL(ts->status, "IRREVERSIBLE"); ts = status.get_trx_state(std::get<1>(hold_pairs[0])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_19_alt->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_19_alt->block->timestamp); + BOOST_CHECK(ts->block_id == b_19_alt->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_19_alt->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), pre_block_20_time); BOOST_CHECK_EQUAL(ts->status, "IRREVERSIBLE"); @@ -803,30 +768,30 @@ BOOST_AUTO_TEST_CASE(trx_finality_status_logic) { try { ts = status.get_trx_state(std::get<1>(trx_pairs_21[0])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_21->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_21->block->timestamp); + BOOST_CHECK(ts->block_id == b_21->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_21->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), block_21_time); BOOST_CHECK_EQUAL(ts->status, "FORKED_OUT"); fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); ts = status.get_trx_state(std::get<1>(trx_pairs_22[0])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_22->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_22->block->timestamp); + BOOST_CHECK(ts->block_id == b_22->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_22->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), block_22_time); BOOST_CHECK_EQUAL(ts->status, "FORKED_OUT"); ts = status.get_trx_state(std::get<1>(trx_pairs_22_alt[0])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_22_alt->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_22_alt->block->timestamp); + BOOST_CHECK(ts->block_id == b_22_alt->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_22_alt->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), block_22_alt_time); BOOST_CHECK_EQUAL(ts->status, "FORKED_OUT"); ts = status.get_trx_state(std::get<1>(trx_pairs_19[0])->id()); BOOST_REQUIRE(ts); - BOOST_CHECK(ts->block_id == bs_19_alt->id); - BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == bs_19_alt->block->timestamp); + BOOST_CHECK(ts->block_id == b_19_alt->calculate_id()); + BOOST_CHECK(block_timestamp_type(ts->block_timestamp) == b_19_alt->timestamp); BOOST_CHECK_EQUAL(ts->received.to_iso_string(), block_19_time); BOOST_CHECK_EQUAL(ts->status, "IRREVERSIBLE"); @@ -834,7 +799,7 @@ BOOST_AUTO_TEST_CASE(trx_finality_status_logic) { try { namespace { using trx_deque = eosio::chain::deque< std::tuple< chain::transaction_trace_ptr, packed_transaction_ptr > >; - const eosio::chain::block_state_legacy_ptr no_bs; + const eosio::chain::signed_block_ptr no_b; struct block_frame { static uint32_t last_used_block_num; @@ -844,7 +809,7 @@ namespace { const std::string time; trx_deque pre_block; trx_deque block; - chain::block_state_legacy_ptr bs; + chain::signed_block_ptr b; std::string context; block_frame(trx_finality_status_processing& finality_status, const char* block_time, uint32_t block_num = 0) @@ -854,14 +819,14 @@ namespace { block_frame::last_used_block_num = bn; for (uint32_t i = 0; i < block_frame::num; ++i) { auto trx = make_unique_trx(fc::seconds(30)); - auto trace = make_transaction_trace( trx, bn, no_bs); + auto trace = make_transaction_trace( trx, bn, no_b); pre_block.push_back(std::tuple(trace, trx)); status.signal_applied_transaction(trace, trx); } - bs = make_block_state(bn); + b = make_block(bn); for (uint32_t i = 0; i < block_frame::num; ++i) { auto trx = make_unique_trx(fc::seconds(30)); - auto trace = make_transaction_trace( trx, bn, bs); + auto trace = make_transaction_trace( trx, bn, b); block.push_back(std::tuple(trace, trx)); status.signal_applied_transaction(trace, trx); } @@ -869,7 +834,7 @@ namespace { void verify_block(uint32_t begin = 0, uint32_t end = std::numeric_limits::max()) { context = "verify_block"; - verify(block, bs, begin, end); + verify(block, b, begin, end); } void verify_block_not_there(uint32_t begin = 0, uint32_t end = std::numeric_limits::max()) { @@ -879,7 +844,7 @@ namespace { void verify_spec_block(uint32_t begin = 0, uint32_t end = std::numeric_limits::max()) { context = "verify_spec_block"; - verify(pre_block, no_bs, begin, end); + verify(pre_block, no_b, begin, end); } void verify_spec_block_not_there(uint32_t begin = 0, uint32_t end = std::numeric_limits::max()) { @@ -898,7 +863,7 @@ namespace { status.signal_applied_transaction(trace, txn); } - status.signal_accepted_block(bs->block, bs->id); + status.signal_accepted_block(b, b->calculate_id()); } void send_spec_block() { @@ -914,11 +879,11 @@ namespace { } private: - void verify(const trx_deque& trx_pairs, const chain::block_state_legacy_ptr& bs, uint32_t begin, uint32_t end) { + void verify(const trx_deque& trx_pairs, const chain::signed_block_ptr& b, uint32_t begin, uint32_t end) { if (end == std::numeric_limits::max()) { end = block.size(); } - const auto id = bs ? bs->id : eosio::chain::transaction_id_type{}; + const auto id = b ? b->calculate_id() : eosio::chain::transaction_id_type{}; for (auto i = begin; i < end; ++i) { const auto& trx_pair = trx_pairs[i]; std::string msg = context + ": block_num==" + std::to_string(bn) + ", i==" + std::to_string(i) + ", id: " + std::string(std::get<1>(trx_pair)->id()); @@ -950,15 +915,6 @@ BOOST_AUTO_TEST_CASE(trx_finality_status_storage_reduction) { try { const uint64_t max_storage = 10'000; trx_finality_status_processing status(max_storage, max_success_duration, max_failure_duration); - // auto verify_trx = [&status](trx_deque& trx_pairs, const eosio::chain::block_state_ptr& bs) { - // const auto id = bs ? bs->id : eosio::chain::transaction_id_type{}; - // for (const auto& trx_pair : trx_pairs) { - // auto ts = status.get_trx_state(std::get<1>(trx_pair)->id()); - // BOOST_REQUIRE(ts); - // BOOST_CHECK(ts->block_id == id); - // } - // }; - block_frame b_01(status, "04:44:00.500", 1); b_01.send_spec_block(); b_01.verify_spec_block(); @@ -1054,9 +1010,9 @@ BOOST_AUTO_TEST_CASE(trx_finality_status_storage_reduction) { try { auto cs = status.get_chain_state(); - BOOST_CHECK(cs.head_id == b_11.bs->id); + BOOST_CHECK(cs.head_id == b_11.b->calculate_id()); BOOST_CHECK(cs.irr_id == eosio::chain::block_id_type{}); - BOOST_CHECK(cs.earliest_tracked_block_id == b_01.bs->id); + BOOST_CHECK(cs.earliest_tracked_block_id == b_01.b->calculate_id()); // Test expects the next block range to exceed max_storage. Need to adjust // this test if this fails. @@ -1071,11 +1027,11 @@ BOOST_AUTO_TEST_CASE(trx_finality_status_storage_reduction) { try { b_12.verify_block(); cs = status.get_chain_state(); - BOOST_CHECK(cs.head_id == b_12.bs->id); - BOOST_CHECK(cs.head_block_timestamp == b_12.bs->block->timestamp); + BOOST_CHECK(cs.head_id == b_12.b->calculate_id()); + BOOST_CHECK(cs.head_block_timestamp == b_12.b->timestamp); BOOST_CHECK(cs.irr_id == eosio::chain::block_id_type{}); BOOST_CHECK(cs.irr_block_timestamp == eosio::chain::block_timestamp_type{}); - BOOST_CHECK(cs.earliest_tracked_block_id == b_03.bs->id); + BOOST_CHECK(cs.earliest_tracked_block_id == b_03.b->calculate_id()); b_01.verify_spec_block_not_there(); @@ -1124,16 +1080,6 @@ BOOST_AUTO_TEST_CASE(trx_finality_status_lifespan) { try { const uint64_t max_storage = 10'000; trx_finality_status_processing status(max_storage, max_success_duration, max_failure_duration); - // auto verify_trx = [&status](trx_deque& trx_pairs, const eosio::chain::block_state_ptr& bs) { - // const auto id = bs ? bs->id : eosio::chain::transaction_id_type{}; - // for (const auto& trx_pair : trx_pairs) { - // auto ts = status.get_trx_state(std::get<1>(trx_pair)->id()); - // BOOST_REQUIRE(ts); - // BOOST_CHECK(ts->block_id == id); - // } - // }; - - block_frame b_01(status, "04:44:00.500", 1); b_01.send_spec_block(); b_01.verify_spec_block(); @@ -1191,9 +1137,9 @@ BOOST_AUTO_TEST_CASE(trx_finality_status_lifespan) { try { b_01.verify_spec_block(); auto cs = status.get_chain_state(); - BOOST_CHECK(cs.head_id == b_06.bs->id); + BOOST_CHECK(cs.head_id == b_06.b->calculate_id()); BOOST_CHECK(cs.irr_id == eosio::chain::block_id_type{}); - BOOST_CHECK(cs.earliest_tracked_block_id == b_02.bs->id); + BOOST_CHECK(cs.earliest_tracked_block_id == b_02.b->calculate_id()); block_frame b_07(status, "04:44:30.500"); @@ -1210,9 +1156,9 @@ BOOST_AUTO_TEST_CASE(trx_finality_status_lifespan) { try { b_02.verify_spec_block(); cs = status.get_chain_state(); - BOOST_CHECK(cs.head_id == b_07.bs->id); + BOOST_CHECK(cs.head_id == b_07.b->calculate_id()); BOOST_CHECK(cs.irr_id == eosio::chain::block_id_type{}); - BOOST_CHECK(cs.earliest_tracked_block_id == b_03.bs->id); + BOOST_CHECK(cs.earliest_tracked_block_id == b_03.b->calculate_id()); block_frame b_08(status, "04:44:35.500"); diff --git a/plugins/chain_plugin/test/test_trx_retry_db.cpp b/plugins/chain_plugin/test/test_trx_retry_db.cpp index f9810b30bd..a036f5a36f 100644 --- a/plugins/chain_plugin/test/test_trx_retry_db.cpp +++ b/plugins/chain_plugin/test/test_trx_retry_db.cpp @@ -135,7 +135,7 @@ uint64_t get_id( const packed_transaction_ptr& ptr ) { return get_id( ptr->get_transaction() ); } -auto make_block_state( uint32_t block_num, std::vector trxs ) { +auto make_block( uint32_t block_num, std::vector trxs ) { name producer = "kevinh"_n; chain::signed_block_ptr block = std::make_shared(); for( auto& trx : trxs ) { @@ -152,42 +152,11 @@ auto make_block_state( uint32_t block_num, std::vectorproducer, "active" ); auto pub_key = get_public_key( block->producer, "active" ); - auto prev = std::make_shared(); - auto header_bmroot = chain::digest_type::hash( std::make_pair( block->digest(), prev->blockroot_merkle.get_root())); - auto sig_digest = chain::digest_type::hash( std::make_pair( header_bmroot, prev->pending_schedule.schedule_hash )); + auto header_bmroot = chain::digest_type::hash( std::make_pair( block->digest(), digest_type{})); + auto sig_digest = chain::digest_type::hash( std::make_pair( header_bmroot, digest_type{} )); block->producer_signature = priv_key.sign( sig_digest ); - std::vector signing_keys; - signing_keys.emplace_back( priv_key ); - auto signer = [&]( chain::digest_type d ) { - std::vector result; - result.reserve( signing_keys.size()); - for( const auto& k: signing_keys ) - result.emplace_back( k.sign( d )); - return result; - }; - chain::pending_block_header_state_legacy pbhs; - pbhs.producer = block->producer; - pbhs.timestamp = block->timestamp; - pbhs.previous = block->previous; - chain::producer_authority_schedule schedule = - {0, {chain::producer_authority{block->producer, - chain::block_signing_authority_v0{1, {{pub_key, 1}}}}}}; - pbhs.active_schedule = schedule; - pbhs.valid_block_signing_authority = chain::block_signing_authority_v0{1, {{pub_key, 1}}}; - auto bsp = std::make_shared( - std::move( pbhs ), - std::move( block ), - deque(), - chain::protocol_feature_set(), - []( chain::block_timestamp_type timestamp, - const fc::flat_set& cur_features, - const std::vector& new_features ) {}, - signer - ); - bsp->block_num = block_num; - - return bsp; + return block; } } // anonymous namespace @@ -274,30 +243,30 @@ BOOST_AUTO_TEST_CASE(trx_retry_logic) { trx_2_expired = true; } ); // signal block, nothing should be expired as now has not changed - auto bsp1 = make_block_state(1, {}); + auto bp1 = make_block(1, {}); trx_retry.on_block_start(1); - trx_retry.on_accepted_block(bsp1->block_num); - trx_retry.on_irreversible_block(bsp1->block); + trx_retry.on_accepted_block(bp1->block_num()); + trx_retry.on_irreversible_block(bp1); BOOST_CHECK(!trx_1_expired); BOOST_CHECK(!trx_2_expired); // increase time by 3 seconds to expire first pnow += boost::posix_time::seconds(3); fc::mock_time_traits::set_now(pnow); // signal block, first transaction should expire - auto bsp2 = make_block_state(2, {}); + auto bp2 = make_block(2, {}); trx_retry.on_block_start(2); - trx_retry.on_accepted_block(bsp2->block_num); - trx_retry.on_irreversible_block(bsp2->block); + trx_retry.on_accepted_block(bp2->block_num()); + trx_retry.on_irreversible_block(bp2); BOOST_CHECK(trx_1_expired); BOOST_CHECK(!trx_2_expired); // increase time by 2 seconds to expire second pnow += boost::posix_time::seconds(2); fc::mock_time_traits::set_now(pnow); // signal block, second transaction should expire - auto bsp3 = make_block_state(3, {}); + auto bp3 = make_block(3, {}); trx_retry.on_block_start(3); - trx_retry.on_accepted_block(bsp3->block_num); - trx_retry.on_irreversible_block(bsp3->block); + trx_retry.on_accepted_block(bp3->block_num()); + trx_retry.on_irreversible_block(bp3); BOOST_CHECK(trx_1_expired); BOOST_CHECK(trx_2_expired); BOOST_CHECK_EQUAL(0u, trx_retry.size()); @@ -326,18 +295,18 @@ BOOST_AUTO_TEST_CASE(trx_retry_logic) { pnow += (pretry_interval - boost::posix_time::seconds(1)); fc::mock_time_traits::set_now(pnow); // signal block, transaction 3 should be sent - auto bsp4 = make_block_state(4, {}); + auto bp4 = make_block(4, {}); trx_retry.on_block_start(4); - trx_retry.on_accepted_block(bsp4->block_num); + trx_retry.on_accepted_block(bp4->block_num()); BOOST_CHECK( get_id(transactions_acked.pop().second) == 3 ); BOOST_CHECK_EQUAL( 0u, transactions_acked.size() ); // increase time by 1 seconds, so trx_4 is sent pnow += boost::posix_time::seconds(1); fc::mock_time_traits::set_now(pnow); // signal block, transaction 4 should be sent - auto bsp5 = make_block_state(5, {}); + auto bp5 = make_block(5, {}); trx_retry.on_block_start(5); - trx_retry.on_accepted_block(bsp5->block_num); + trx_retry.on_accepted_block(bp5->block_num()); BOOST_CHECK( get_id(transactions_acked.pop().second) == 4 ); BOOST_CHECK_EQUAL( 0u, transactions_acked.size() ); BOOST_CHECK(!trx_3_expired); @@ -345,12 +314,12 @@ BOOST_AUTO_TEST_CASE(trx_retry_logic) { // go ahead and expire them now pnow += boost::posix_time::seconds(30); fc::mock_time_traits::set_now(pnow); - auto bsp6 = make_block_state(6, {}); + auto bp6 = make_block(6, {}); trx_retry.on_block_start(6); - trx_retry.on_accepted_block(bsp6->block_num); - trx_retry.on_irreversible_block(bsp4->block); - trx_retry.on_irreversible_block(bsp5->block); - trx_retry.on_irreversible_block(bsp6->block); + trx_retry.on_accepted_block(bp6->block_num()); + trx_retry.on_irreversible_block(bp4); + trx_retry.on_irreversible_block(bp5); + trx_retry.on_irreversible_block(bp6); BOOST_CHECK(trx_3_expired); BOOST_CHECK(trx_4_expired); BOOST_CHECK_EQUAL(0u, trx_retry.size()); @@ -376,9 +345,9 @@ BOOST_AUTO_TEST_CASE(trx_retry_logic) { trx_6_variant = true; } ); // not in block 7, so not returned to user - auto bsp7 = make_block_state(7, {}); + auto bp7 = make_block(7, {}); trx_retry.on_block_start(7); - trx_retry.on_accepted_block(bsp7->block_num); + trx_retry.on_accepted_block(bp7->block_num()); BOOST_CHECK(!trx_5_variant); BOOST_CHECK(!trx_6_variant); // 5,6 in block 8 @@ -389,37 +358,37 @@ BOOST_AUTO_TEST_CASE(trx_retry_logic) { auto trace_6 = make_transaction_trace( trx_6, 8); trx_retry.on_applied_transaction(trace_5, trx_5); trx_retry.on_applied_transaction(trace_6, trx_6); - auto bsp8 = make_block_state(8, {trx_5, trx_6}); - trx_retry.on_accepted_block(bsp8->block_num); + auto bp8 = make_block(8, {trx_5, trx_6}); + trx_retry.on_accepted_block(bp8->block_num()); BOOST_CHECK(!trx_5_variant); BOOST_CHECK(!trx_6_variant); // need 2 blocks before 6 returned to user pnow += boost::posix_time::seconds(1); // new block, new time fc::mock_time_traits::set_now(pnow); - auto bsp9 = make_block_state(9, {}); + auto bp9 = make_block(9, {}); trx_retry.on_block_start(9); - trx_retry.on_accepted_block(bsp9->block_num); + trx_retry.on_accepted_block(bp9->block_num()); BOOST_CHECK(!trx_5_variant); BOOST_CHECK(!trx_6_variant); pnow += boost::posix_time::seconds(1); // new block, new time fc::mock_time_traits::set_now(pnow); - auto bsp10 = make_block_state(10, {}); + auto bp10 = make_block(10, {}); trx_retry.on_block_start(10); - trx_retry.on_accepted_block(bsp10->block_num); + trx_retry.on_accepted_block(bp10->block_num()); BOOST_CHECK(!trx_5_variant); BOOST_CHECK(trx_6_variant); // now signal lib for trx_6 pnow += boost::posix_time::seconds(1); // new block, new time fc::mock_time_traits::set_now(pnow); - auto bsp11 = make_block_state(11, {}); + auto bp11 = make_block(11, {}); trx_retry.on_block_start(11); - trx_retry.on_accepted_block(bsp11->block_num); + trx_retry.on_accepted_block(bp11->block_num()); BOOST_CHECK(!trx_5_variant); BOOST_CHECK(trx_6_variant); - trx_retry.on_irreversible_block(bsp7->block); + trx_retry.on_irreversible_block(bp7); BOOST_CHECK(!trx_5_variant); BOOST_CHECK(trx_6_variant); - trx_retry.on_irreversible_block(bsp8->block); + trx_retry.on_irreversible_block(bp8); BOOST_CHECK(trx_5_variant); BOOST_CHECK(trx_6_variant); BOOST_CHECK_EQUAL(0u, trx_retry.size()); @@ -454,9 +423,9 @@ BOOST_AUTO_TEST_CASE(trx_retry_logic) { } ); // not in block 12 - auto bsp12 = make_block_state(12, {}); + auto bp12 = make_block(12, {}); trx_retry.on_block_start(12); - trx_retry.on_accepted_block(bsp12->block_num); + trx_retry.on_accepted_block(bp12->block_num()); BOOST_CHECK(!trx_7_variant); BOOST_CHECK(!trx_8_variant); BOOST_CHECK(!trx_9_expired); @@ -470,25 +439,25 @@ BOOST_AUTO_TEST_CASE(trx_retry_logic) { trx_retry.on_applied_transaction(trace_7, trx_7); trx_retry.on_applied_transaction(trace_8, trx_8); trx_retry.on_applied_transaction(trace_9, trx_9); - auto bsp13 = make_block_state(13, {trx_7, trx_8, trx_9}); - trx_retry.on_accepted_block(bsp13->block_num); + auto bp13 = make_block(13, {trx_7, trx_8, trx_9}); + trx_retry.on_accepted_block(bp13->block_num()); BOOST_CHECK(!trx_7_variant); BOOST_CHECK(!trx_8_variant); BOOST_CHECK(!trx_9_expired); // need 3 blocks before 8 returned to user pnow += boost::posix_time::seconds(1); // new block, new time, 1st block fc::mock_time_traits::set_now(pnow); - auto bsp14 = make_block_state(14, {}); + auto bp14 = make_block(14, {}); trx_retry.on_block_start(14); - trx_retry.on_accepted_block(bsp14->block_num); + trx_retry.on_accepted_block(bp14->block_num()); BOOST_CHECK(!trx_7_variant); BOOST_CHECK(!trx_8_variant); BOOST_CHECK(!trx_9_expired); pnow += boost::posix_time::seconds(1); // new block, new time, 2nd block fc::mock_time_traits::set_now(pnow); - auto bsp15 = make_block_state(15, {}); + auto bp15 = make_block(15, {}); trx_retry.on_block_start(15); - trx_retry.on_accepted_block(bsp15->block_num); + trx_retry.on_accepted_block(bp15->block_num()); BOOST_CHECK(!trx_7_variant); BOOST_CHECK(!trx_8_variant); BOOST_CHECK(!trx_9_expired); @@ -499,85 +468,85 @@ BOOST_AUTO_TEST_CASE(trx_retry_logic) { // should still be tracking them BOOST_CHECK_EQUAL(3u, trx_retry.size()); // now produce an empty 13 - auto bsp13b = make_block_state(13, {}); // now 13 has no traces - trx_retry.on_accepted_block(bsp13b->block_num); + auto bp13b = make_block(13, {}); // now 13 has no traces + trx_retry.on_accepted_block(bp13b->block_num()); // produced another empty block pnow += boost::posix_time::seconds(1); // new block, new time fc::mock_time_traits::set_now(pnow); trx_retry.on_block_start(14); // now produce an empty 14 - auto bsp14b = make_block_state(14, {}); // empty - trx_retry.on_accepted_block(bsp14b->block_num); + auto bp14b = make_block(14, {}); // empty + trx_retry.on_accepted_block(bp14b->block_num()); // produce block with 7,8 trx_retry.on_block_start(15); auto trace_7b = make_transaction_trace( trx_7, 15); auto trace_8b = make_transaction_trace( trx_8, 15); trx_retry.on_applied_transaction(trace_7b, trx_7); trx_retry.on_applied_transaction(trace_8b, trx_8); - auto bsp15b = make_block_state(15, {trx_7, trx_8}); - trx_retry.on_accepted_block(bsp15b->block_num); + auto bp15b = make_block(15, {trx_7, trx_8}); + trx_retry.on_accepted_block(bp15b->block_num()); // need 3 blocks before 8 returned to user pnow += boost::posix_time::seconds(1); // new block, new time fc::mock_time_traits::set_now(pnow); - auto bsp16 = make_block_state(16, {}); + auto bp16 = make_block(16, {}); trx_retry.on_block_start(16); - trx_retry.on_accepted_block(bsp16->block_num); + trx_retry.on_accepted_block(bp16->block_num()); BOOST_CHECK(!trx_7_variant); BOOST_CHECK(!trx_8_variant); BOOST_CHECK(!trx_9_expired); pnow += boost::posix_time::seconds(1); // new block, new time fc::mock_time_traits::set_now(pnow); - auto bsp17 = make_block_state(17, {}); + auto bp17 = make_block(17, {}); trx_retry.on_block_start(17); - trx_retry.on_accepted_block(bsp17->block_num); + trx_retry.on_accepted_block(bp17->block_num()); BOOST_CHECK(!trx_7_variant); BOOST_CHECK(!trx_8_variant); BOOST_CHECK(!trx_9_expired); pnow += boost::posix_time::seconds(1); // new block, new time, 3rd one fc::mock_time_traits::set_now(pnow); - auto bsp18 = make_block_state(18, {}); + auto bp18 = make_block(18, {}); trx_retry.on_block_start(18); - trx_retry.on_accepted_block(bsp18->block_num); + trx_retry.on_accepted_block(bp18->block_num()); BOOST_CHECK(!trx_7_variant); BOOST_CHECK(trx_8_variant); BOOST_CHECK(!trx_9_expired); - trx_retry.on_irreversible_block(bsp9->block); - trx_retry.on_irreversible_block(bsp10->block); - trx_retry.on_irreversible_block(bsp11->block); - trx_retry.on_irreversible_block(bsp12->block); - trx_retry.on_irreversible_block(bsp13b->block); - trx_retry.on_irreversible_block(bsp14b->block); + trx_retry.on_irreversible_block(bp9); + trx_retry.on_irreversible_block(bp10); + trx_retry.on_irreversible_block(bp11); + trx_retry.on_irreversible_block(bp12); + trx_retry.on_irreversible_block(bp13b); + trx_retry.on_irreversible_block(bp14b); BOOST_CHECK(!trx_7_variant); BOOST_CHECK(trx_8_variant); BOOST_CHECK(!trx_9_expired); - trx_retry.on_irreversible_block(bsp15b->block); + trx_retry.on_irreversible_block(bp15b); BOOST_CHECK(trx_7_variant); BOOST_CHECK(trx_8_variant); BOOST_CHECK(!trx_9_expired); // verify trx_9 expires pnow += boost::posix_time::seconds(21); // new block, new time, before expire fc::mock_time_traits::set_now(pnow); - auto bsp19 = make_block_state(19, {}); + auto bp19 = make_block(19, {}); trx_retry.on_block_start(19); - trx_retry.on_accepted_block(bsp19->block_num); - trx_retry.on_irreversible_block(bsp15->block); - trx_retry.on_irreversible_block(bsp16->block); - trx_retry.on_irreversible_block(bsp17->block); - trx_retry.on_irreversible_block(bsp18->block); - trx_retry.on_irreversible_block(bsp19->block); + trx_retry.on_accepted_block(bp19->block_num()); + trx_retry.on_irreversible_block(bp15); + trx_retry.on_irreversible_block(bp16); + trx_retry.on_irreversible_block(bp17); + trx_retry.on_irreversible_block(bp18); + trx_retry.on_irreversible_block(bp19); BOOST_CHECK(trx_7_variant); BOOST_CHECK(trx_8_variant); BOOST_CHECK(!trx_9_expired); pnow += boost::posix_time::seconds(1); // new block, new time, trx_9 now expired fc::mock_time_traits::set_now(pnow); - auto bsp20 = make_block_state(20, {}); + auto bp20 = make_block(20, {}); trx_retry.on_block_start(20); - trx_retry.on_accepted_block(bsp20->block_num); + trx_retry.on_accepted_block(bp20->block_num()); // waits for LIB BOOST_CHECK(trx_7_variant); BOOST_CHECK(trx_8_variant); BOOST_CHECK(!trx_9_expired); - trx_retry.on_irreversible_block(bsp20->block); + trx_retry.on_irreversible_block(bp20); BOOST_CHECK(trx_7_variant); BOOST_CHECK(trx_8_variant); BOOST_CHECK(trx_9_expired); @@ -606,15 +575,15 @@ BOOST_AUTO_TEST_CASE(trx_retry_logic) { auto trace_11 = make_transaction_trace( trx_11, 21); trx_retry.on_applied_transaction(trace_10, trx_10); trx_retry.on_applied_transaction(trace_11, trx_11); - auto bsp21 = make_block_state(21, {trx_10, trx_11}); - trx_retry.on_accepted_block(bsp21->block_num); + auto bp21 = make_block(21, {trx_10, trx_11}); + trx_retry.on_accepted_block(bp21->block_num()); BOOST_CHECK(trx_10_variant); BOOST_CHECK(!trx_11_variant); pnow += boost::posix_time::seconds(1); // new block, new time fc::mock_time_traits::set_now(pnow); - auto bsp22 = make_block_state(22, {}); + auto bp22 = make_block(22, {}); trx_retry.on_block_start(22); - trx_retry.on_accepted_block(bsp22->block_num); + trx_retry.on_accepted_block(bp22->block_num()); BOOST_CHECK(trx_10_variant); BOOST_CHECK(trx_11_variant); BOOST_CHECK_EQUAL(0u, trx_retry.size()); diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 8fdc8bceb2..bf8f7d0a07 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -3718,7 +3718,7 @@ namespace eosio { controller& cc = my_impl->chain_plug->chain(); // may have come in on a different connection and posted into dispatcher strand before this one - if( my_impl->dispatcher->have_block( id ) || cc.fetch_block_state_by_id( id ) ) { // thread-safe + if( my_impl->dispatcher->have_block( id ) || cc.fetch_block_by_id( id ) ) { // thread-safe my_impl->dispatcher->add_peer_block( id, c->connection_id ); c->strand.post( [c, id]() { my_impl->sync_master->sync_recv_block( c, id, block_header::num_from_id(id), false ); @@ -3749,13 +3749,13 @@ namespace eosio { } - uint32_t block_num = bsp ? bsp->block_num : 0; + uint32_t block_num = bsp ? bsp->block_num() : 0; if( block_num != 0 ) { fc_dlog( logger, "validated block header, broadcasting immediately, connection ${cid}, blk num = ${num}, id = ${id}", - ("cid", cid)("num", block_num)("id", bsp->id) ); - my_impl->dispatcher->add_peer_block( bsp->id, cid ); // no need to send back to sender - my_impl->dispatcher->bcast_block( bsp->block, bsp->id ); + ("cid", cid)("num", block_num)("id", bsp->id()) ); + my_impl->dispatcher->add_peer_block( bsp->id(), cid ); // no need to send back to sender + my_impl->dispatcher->bcast_block( bsp->block, bsp->id() ); } app().executor().post(priority::medium, exec_queue::read_write, [ptr{std::move(ptr)}, bsp{std::move(bsp)}, id, c{std::move(c)}]() mutable { diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 36f36ccbba..d64884a2a4 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -734,9 +734,8 @@ class producer_plugin_impl : public std::enable_shared_from_thisheader.timestamp.next().to_time_point() >= now) { + if (chain.head_block_timestamp().next().to_time_point() >= now) { _production_enabled = true; } @@ -747,13 +746,15 @@ class producer_plugin_impl : public std::enable_shared_from_thistransactions.size())("lib", chain.last_irreversible_block_num()) ("net", br.total_net_usage)("cpu", br.total_cpu_usage_us) ("elapsed", br.total_elapsed_time)("time", br.total_time)("latency", (now - block->timestamp).count() / 1000)); - if (chain.get_read_mode() != db_read_mode::IRREVERSIBLE && hbs->id != id && hbs->block != nullptr) { // not applied to head + const auto& hb_id = chain.head_block_id(); + const auto& hb = chain.head_block(); + if (chain.get_read_mode() != db_read_mode::IRREVERSIBLE && hb && hb_id != id && hb != nullptr) { // not applied to head ilog("Block not applied to head ${id}... #${n} @ ${t} signed by ${p} " "[trxs: ${count}, lib: ${lib}, net: ${net}, cpu: ${cpu}, elapsed: ${elapsed}, time: ${time}, latency: ${latency} ms]", - ("p", hbs->block->producer)("id", hbs->id.str().substr(8, 16))("n", hbs->block_num)("t", hbs->block->timestamp) - ("count", hbs->block->transactions.size())("lib", chain.last_irreversible_block_num()) + ("p", hb->producer)("id", hb_id.str().substr(8, 16))("n", hb->block_num())("t", hb->timestamp) + ("count", hb->transactions.size())("lib", chain.last_irreversible_block_num()) ("net", br.total_net_usage)("cpu", br.total_cpu_usage_us)("elapsed", br.total_elapsed_time)("time", br.total_time) - ("latency", (now - hbs->block->timestamp).count() / 1000)); + ("latency", (now - hb->timestamp).count() / 1000)); } } if (_update_incoming_block_metrics) { @@ -1014,14 +1015,10 @@ void new_chain_banner(const eosio::chain::controller& db) "*******************************\n" "\n"; - if( db.head_block_state()->header.timestamp.to_time_point() < (fc::time_point::now() - fc::milliseconds(200 * config::block_interval_ms))) - { + if( db.head_block_time() < (fc::time_point::now() - fc::milliseconds(200 * config::block_interval_ms))) { std::cerr << "Your genesis seems to have an old timestamp\n" - "Please consider using the --genesis-timestamp option to give your genesis a recent timestamp\n" - "\n" - ; + "Please consider using the --genesis-timestamp option to give your genesis a recent timestamp\n\n"; } - return; } producer_plugin::producer_plugin() @@ -1776,9 +1773,9 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { if (!chain_plug->accept_transactions()) return start_block_result::waiting_for_block; - const auto& hbs = chain.head_block_state(); + uint32_t head_block_num = chain.head_block_num(); - if (chain.get_terminate_at_block() > 0 && chain.get_terminate_at_block() <= chain.head_block_num()) { + if (chain.get_terminate_at_block() > 0 && chain.get_terminate_at_block() <= head_block_num) { ilog("Reached configured maximum block ${num}; terminating", ("num", chain.get_terminate_at_block())); app().quit(); return start_block_result::failed; @@ -1786,12 +1783,12 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { const fc::time_point now = fc::time_point::now(); const block_timestamp_type block_time = calculate_pending_block_time(); - const uint32_t pending_block_num = hbs->block_num + 1; + const uint32_t pending_block_num = head_block_num + 1; _pending_block_mode = pending_block_mode::producing; // Not our turn - const auto& scheduled_producer = hbs->get_scheduled_producer(block_time); + const auto scheduled_producer = chain.active_producers().get_scheduled_producer(block_time); const auto current_watermark = _producer_watermarks.get_watermark(scheduled_producer.producer_name); @@ -1827,10 +1824,10 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { // determine if our watermark excludes us from producing at this point if (current_watermark) { const block_timestamp_type block_timestamp{block_time}; - if (current_watermark->first > hbs->block_num) { + if (current_watermark->first > head_block_num) { elog("Not producing block because \"${producer}\" signed a block at a higher block number (${watermark}) than the current " "fork's head (${head_block_num})", - ("producer", scheduled_producer.producer_name)("watermark", current_watermark->first)("head_block_num", hbs->block_num)); + ("producer", scheduled_producer.producer_name)("watermark", current_watermark->first)("head_block_num", head_block_num)); _pending_block_mode = pending_block_mode::speculating; } else if (current_watermark->second >= block_timestamp) { elog("Not producing block because \"${producer}\" signed a block at the next block time or later (${watermark}) than the pending " @@ -1881,7 +1878,8 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { try { uint16_t blocks_to_confirm = 0; - if (in_producing_mode() && hbs->dpos_irreversible_blocknum != hs_dpos_irreversible_blocknum) { // only if hotstuff not enabled + auto block_state = chain.head_block_state_legacy(); // null means if is active + if (in_producing_mode() && block_state && block_state->dpos_irreversible_blocknum != hs_dpos_irreversible_blocknum) { // only if hotstuff not enabled // determine how many blocks this producer can confirm // 1) if it is not a producer from this node, assume no confirmations (we will discard this block anyway) // 2) if it is a producer on this node that has never produced, the conservative approach is to assume no @@ -1889,14 +1887,14 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { // 3) if it is a producer on this node where this node knows the last block it produced, safely set it -UNLESS- // 4) the producer on this node's last watermark is higher (meaning on a different fork) if (current_watermark) { - auto watermark_bn = current_watermark->first; - if (watermark_bn < hbs->block_num) { - blocks_to_confirm = (uint16_t)(std::min(std::numeric_limits::max(), (uint32_t)(hbs->block_num - watermark_bn))); + uint32_t watermark_bn = current_watermark->first; + if (watermark_bn < head_block_num) { + blocks_to_confirm = (uint16_t)(std::min(std::numeric_limits::max(), (head_block_num - watermark_bn))); } } // can not confirm irreversible blocks - blocks_to_confirm = (uint16_t)(std::min(blocks_to_confirm, (uint32_t)(hbs->block_num - hbs->dpos_irreversible_blocknum))); + blocks_to_confirm = (uint16_t)(std::min(blocks_to_confirm, (head_block_num - block_state->dpos_irreversible_blocknum))); } abort_block(); @@ -1949,7 +1947,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { LOG_AND_DROP(); if (chain.is_building_block()) { - const auto& pending_block_signing_authority = chain.pending_block_signing_authority(); + auto pending_block_signing_authority = chain.pending_block_signing_authority(); if (in_producing_mode() && pending_block_signing_authority != scheduled_producer.authority) { elog("Unexpected block signing authority, reverting to speculative mode! [expected: \"${expected}\", actual: \"${actual\"", @@ -1959,7 +1957,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { try { chain::subjective_billing& subjective_bill = chain.get_mutable_subjective_billing(); - _account_fails.report_and_clear(hbs->block_num, subjective_bill); + _account_fails.report_and_clear(pending_block_num, subjective_bill); if (!remove_expired_trxs(preprocess_deadline)) return start_block_result::exhausted; @@ -2498,7 +2496,7 @@ void producer_plugin_impl::schedule_production_loop() { chain::controller& chain = chain_plug->chain(); fc_dlog(_log, "Waiting till another block is received and scheduling Speculative/Production Change"); auto wake_time = block_timing_util::calculate_producer_wake_up_time(_produce_block_cpu_effort, chain.head_block_num(), calculate_pending_block_time(), - _producers, chain.head_block_state()->active_schedule.producers, + _producers, chain.active_producers().producers, _producer_watermarks); schedule_delayed_production_loop(weak_from_this(), wake_time); } else { @@ -2517,7 +2515,7 @@ void producer_plugin_impl::schedule_production_loop() { fc_dlog(_log, "Speculative Block Created; Scheduling Speculative/Production Change"); EOS_ASSERT(chain.is_building_block(), missing_pending_block_state, "speculating without pending_block_state"); auto wake_time = block_timing_util::calculate_producer_wake_up_time(fc::microseconds{config::block_interval_us}, chain.pending_block_num(), chain.pending_block_timestamp(), - _producers, chain.head_block_state()->active_schedule.producers, + _producers, chain.active_producers().producers, _producer_watermarks); if (wake_time && fc::time_point::now() > *wake_time) { // if wake time has already passed then use the block deadline instead @@ -2623,7 +2621,7 @@ void producer_plugin_impl::produce_block() { EOS_ASSERT(chain.is_building_block(), missing_pending_block_state, "pending_block_state does not exist but it should, another plugin may have corrupted it"); - const auto& auth = chain.pending_block_signing_authority(); + auto auth = chain.pending_block_signing_authority(); std::vector> relevant_providers; relevant_providers.reserve(_signature_providers.size()); @@ -2659,26 +2657,27 @@ void producer_plugin_impl::produce_block() { chain.commit_block(); - block_state_legacy_ptr new_bs = chain.head_block_state(); + const auto& id = chain.head_block_id(); + const auto& new_b = chain.head_block(); producer_plugin::produced_block_metrics metrics; br.total_time += fc::time_point::now() - start; ilog("Produced block ${id}... #${n} @ ${t} signed by ${p} " "[trxs: ${count}, lib: ${lib}, confirmed: ${confs}, net: ${net}, cpu: ${cpu}, elapsed: ${et}, time: ${tt}]", - ("p", new_bs->header.producer)("id", new_bs->id.str().substr(8, 16))("n", new_bs->block_num)("t", new_bs->header.timestamp) - ("count", new_bs->block->transactions.size())("lib", chain.last_irreversible_block_num())("net", br.total_net_usage) - ("cpu", br.total_cpu_usage_us)("et", br.total_elapsed_time)("tt", br.total_time)("confs", new_bs->header.confirmed)); + ("p", new_b->producer)("id", id.str().substr(8, 16))("n", new_b->block_num())("t", new_b->timestamp) + ("count", new_b->transactions.size())("lib", chain.last_irreversible_block_num())("net", br.total_net_usage) + ("cpu", br.total_cpu_usage_us)("et", br.total_elapsed_time)("tt", br.total_time)("confs", new_b->confirmed)); _time_tracker.add_other_time(); - _time_tracker.report(new_bs->block_num, new_bs->block->producer, metrics); + _time_tracker.report(new_b->block_num(), new_b->producer, metrics); _time_tracker.clear(); if (_update_produced_block_metrics) { metrics.unapplied_transactions_total = _unapplied_transactions.size(); metrics.subjective_bill_account_size_total = chain.get_subjective_billing().get_account_cache_size(); metrics.scheduled_trxs_total = chain.db().get_index().size(); - metrics.trxs_produced_total = new_bs->block->transactions.size(); + metrics.trxs_produced_total = new_b->transactions.size(); metrics.cpu_usage_us = br.total_cpu_usage_us; metrics.total_elapsed_time_us = br.total_elapsed_time.count(); metrics.total_time_us = br.total_time.count(); diff --git a/plugins/state_history_plugin/state_history_plugin.cpp b/plugins/state_history_plugin/state_history_plugin.cpp index c40a695e2f..ba0ae54ea6 100644 --- a/plugins/state_history_plugin/state_history_plugin.cpp +++ b/plugins/state_history_plugin/state_history_plugin.cpp @@ -206,7 +206,7 @@ struct state_history_plugin_impl : std::enable_shared_from_this(*block), block->block_num()); + store_chain_state(id, block->previous, block->block_num()); } catch (const fc::exception& e) { fc_elog(_log, "fc::exception: ${details}", ("details", e.to_detail_string())); // Both app().quit() and exception throwing are required. Without app().quit(), @@ -256,7 +256,7 @@ struct state_history_plugin_impl : std::enable_shared_from_thisempty(); @@ -265,7 +265,7 @@ struct state_history_plugin_impl : std::enable_shared_from_thispack_and_write_entry(header, block_header.previous, [this, fresh](auto&& buf) { + chain_state_log->pack_and_write_entry(header, previous_id, [this, fresh](auto&& buf) { pack_deltas(buf, chain_plug->chain().db(), fresh); }); } // store_chain_state @@ -406,10 +406,10 @@ void state_history_plugin_impl::plugin_startup() { try { const auto& chain = chain_plug->chain(); update_current(); - auto bsp = chain.head_block_state(); - if( bsp && chain_state_log && chain_state_log->empty() ) { + uint32_t block_num = chain.head_block_num(); + if( block_num > 0 && chain_state_log && chain_state_log->empty() ) { fc_ilog( _log, "Storing initial state on startup, this can take a considerable amount of time" ); - store_chain_state( bsp->id, bsp->header, bsp->block_num ); + store_chain_state( chain.head_block_id(), chain.head_block_header().previous, block_num ); fc_ilog( _log, "Done storing initial state on startup" ); } first_available_block = chain.earliest_available_block_num(); diff --git a/plugins/test_control_plugin/test_control_plugin.cpp b/plugins/test_control_plugin/test_control_plugin.cpp index 2bf43bdb55..80543c9942 100644 --- a/plugins/test_control_plugin/test_control_plugin.cpp +++ b/plugins/test_control_plugin/test_control_plugin.cpp @@ -62,10 +62,10 @@ void test_control_plugin_impl::process_next_block_state(const chain::block_id_ty // Tests expect the shutdown only after signaling a producer shutdown and seeing a full production cycle const auto block_time = _chain.head_block_time() + fc::microseconds(chain::config::block_interval_us); // have to fetch bsp due to get_scheduled_producer call - const auto& bsp = _chain.fetch_block_state_by_id(id); - const auto& producer_authority = bsp->get_scheduled_producer(block_time); + + const auto& producer_authority = _chain.active_producers().get_scheduled_producer(block_time); const auto producer_name = producer_authority.producer_name; - const auto slot = bsp->block->timestamp.slot % chain::config::producer_repetitions; + const auto slot = _chain.head_block_timestamp().slot % chain::config::producer_repetitions; if (_producer != account_name()) { if( _producer != producer_name ) _clean_producer_sequence = true; if( _clean_producer_sequence ) { diff --git a/plugins/trace_api_plugin/test/include/eosio/trace_api/test_common.hpp b/plugins/trace_api_plugin/test/include/eosio/trace_api/test_common.hpp index a38b034036..371de9709d 100644 --- a/plugins/trace_api_plugin/test/include/eosio/trace_api/test_common.hpp +++ b/plugins/trace_api_plugin/test/include/eosio/trace_api/test_common.hpp @@ -53,8 +53,8 @@ namespace eosio::trace_api { return result; } - inline auto make_block_state( chain::block_id_type previous, uint32_t height, uint32_t slot, chain::name producer, - std::vector trxs ) { + inline auto make_block( chain::block_id_type previous, uint32_t height, uint32_t slot, chain::name producer, + std::vector trxs ) { chain::signed_block_ptr block = std::make_shared(); for( auto& trx : trxs ) { block->transactions.emplace_back( trx ); @@ -71,40 +71,11 @@ namespace eosio::trace_api { auto priv_key = get_private_key( block->producer, "active" ); auto pub_key = get_public_key( block->producer, "active" ); - auto prev = std::make_shared(); - auto header_bmroot = chain::digest_type::hash( std::make_pair( block->digest(), prev->blockroot_merkle.get_root())); - auto sig_digest = chain::digest_type::hash( std::make_pair( header_bmroot, prev->pending_schedule.schedule_hash )); + auto header_bmroot = chain::digest_type::hash( std::make_pair( block->digest(), chain::digest_type{})); + auto sig_digest = chain::digest_type::hash( std::make_pair( header_bmroot, chain::digest_type{} )); block->producer_signature = priv_key.sign( sig_digest ); - std::vector signing_keys; - signing_keys.emplace_back( std::move( priv_key )); - auto signer = [&]( chain::digest_type d ) { - std::vector result; - result.reserve( signing_keys.size()); - for( const auto& k: signing_keys ) - result.emplace_back( k.sign( d )); - return result; - }; - chain::pending_block_header_state_legacy pbhs; - pbhs.producer = block->producer; - pbhs.timestamp = block->timestamp; - chain::producer_authority_schedule schedule = {0, {chain::producer_authority{block->producer, - chain::block_signing_authority_v0{1, {{pub_key, 1}}}}}}; - pbhs.active_schedule = schedule; - pbhs.valid_block_signing_authority = chain::block_signing_authority_v0{1, {{pub_key, 1}}}; - auto bsp = std::make_shared( - std::move( pbhs ), - std::move( block ), - eosio::chain::deque(), - chain::protocol_feature_set(), - []( chain::block_timestamp_type timestamp, - const fc::flat_set& cur_features, - const std::vector& new_features ) {}, - signer - ); - bsp->block_num = height; - - return bsp; + return block; } inline void to_kv_helper(const fc::variant& v, std::function&& append){ diff --git a/plugins/trace_api_plugin/test/test_extraction.cpp b/plugins/trace_api_plugin/test/test_extraction.cpp index e053ce6c52..3c6fbe89ce 100644 --- a/plugins/trace_api_plugin/test/test_extraction.cpp +++ b/plugins/trace_api_plugin/test/test_extraction.cpp @@ -5,7 +5,6 @@ #include #include #include -#include #include #include @@ -136,8 +135,8 @@ struct extraction_test_fixture { extraction_impl.signal_applied_transaction(trace, ptrx); } - void signal_accepted_block( const chain::block_state_legacy_ptr& bsp ) { - extraction_impl.signal_accepted_block(bsp->block, bsp->id); + void signal_accepted_block( const chain::signed_block_ptr& bp ) { + extraction_impl.signal_accepted_block(bp, bp->calculate_id()); } // fixture data and methods @@ -168,10 +167,10 @@ BOOST_AUTO_TEST_SUITE(block_extraction) std::make_shared(ptrx1) ); // accept the block with one transaction - auto bsp1 = make_block_state( chain::block_id_type(), 1, 1, "bp.one"_n, + auto bp1 = make_block( chain::block_id_type(), 1, 1, "bp.one"_n, { chain::packed_transaction(ptrx1) } ); - signal_accepted_block( bsp1 ); - + signal_accepted_block( bp1 ); + const std::vector expected_action_traces { { { @@ -206,23 +205,23 @@ BOOST_AUTO_TEST_SUITE(block_extraction) { ptrx1.id(), expected_action_traces, - fc::enum_type{bsp1->block->transactions[0].status}, - bsp1->block->transactions[0].cpu_usage_us, - bsp1->block->transactions[0].net_usage_words, + fc::enum_type{bp1->transactions[0].status}, + bp1->transactions[0].cpu_usage_us, + bp1->transactions[0].net_usage_words, ptrx1.get_signatures(), make_trx_header(ptrx1.get_transaction()) } }; const block_trace_v2 expected_block_trace { - bsp1->id, + bp1->calculate_id(), 1, - bsp1->prev(), + bp1->previous, chain::block_timestamp_type(1), "bp.one"_n, - bsp1->block->transaction_mroot, - bsp1->block->action_mroot, - bsp1->block->schedule_version, + bp1->transaction_mroot, + bp1->action_mroot, + bp1->schedule_version, std::vector { expected_transaction_trace } @@ -232,7 +231,7 @@ BOOST_AUTO_TEST_SUITE(block_extraction) BOOST_REQUIRE(data_log.size() == 1u); BOOST_REQUIRE(std::holds_alternative(data_log.at(0))); BOOST_REQUIRE_EQUAL(std::get(data_log.at(0)), expected_block_trace); - BOOST_REQUIRE_EQUAL(id_log.at(bsp1->block_num).size(), bsp1->block->transactions.size()); + BOOST_REQUIRE_EQUAL(id_log.at(bp1->block_num()).size(), bp1->transactions.size()); } BOOST_FIXTURE_TEST_CASE(basic_multi_transaction_block, extraction_test_fixture) { @@ -260,9 +259,9 @@ BOOST_AUTO_TEST_SUITE(block_extraction) std::make_shared( ptrx3 ) ); // accept the block with three transaction - auto bsp1 = make_block_state( chain::block_id_type(), 1, 1, "bp.one"_n, + auto bp1 = make_block( chain::block_id_type(), 1, 1, "bp.one"_n, { chain::packed_transaction(ptrx1), chain::packed_transaction(ptrx2), chain::packed_transaction(ptrx3) } ); - signal_accepted_block( bsp1 ); + signal_accepted_block( bp1 ); const std::vector expected_action_trace1 { { @@ -305,9 +304,9 @@ BOOST_AUTO_TEST_SUITE(block_extraction) { ptrx1.id(), expected_action_trace1, - fc::enum_type{bsp1->block->transactions[0].status}, - bsp1->block->transactions[0].cpu_usage_us, - bsp1->block->transactions[0].net_usage_words, + fc::enum_type{bp1->transactions[0].status}, + bp1->transactions[0].cpu_usage_us, + bp1->transactions[0].net_usage_words, ptrx1.get_signatures(), make_trx_header(ptrx1.get_transaction()) } @@ -316,9 +315,9 @@ BOOST_AUTO_TEST_SUITE(block_extraction) { ptrx2.id(), expected_action_trace2, - fc::enum_type{bsp1->block->transactions[1].status}, - bsp1->block->transactions[1].cpu_usage_us, - bsp1->block->transactions[1].net_usage_words, + fc::enum_type{bp1->transactions[1].status}, + bp1->transactions[1].cpu_usage_us, + bp1->transactions[1].net_usage_words, ptrx2.get_signatures(), make_trx_header(ptrx2.get_transaction()) } @@ -327,9 +326,9 @@ BOOST_AUTO_TEST_SUITE(block_extraction) { ptrx3.id(), expected_action_trace3, - fc::enum_type{bsp1->block->transactions[2].status}, - bsp1->block->transactions[2].cpu_usage_us, - bsp1->block->transactions[2].net_usage_words, + fc::enum_type{bp1->transactions[2].status}, + bp1->transactions[2].cpu_usage_us, + bp1->transactions[2].net_usage_words, ptrx3.get_signatures(), make_trx_header(ptrx3.get_transaction()) } @@ -337,14 +336,14 @@ BOOST_AUTO_TEST_SUITE(block_extraction) }; const block_trace_v2 expected_block_trace { - bsp1->id, + bp1->calculate_id(), 1, - bsp1->prev(), + bp1->previous, chain::block_timestamp_type(1), "bp.one"_n, - bsp1->block->transaction_mroot, - bsp1->block->action_mroot, - bsp1->block->schedule_version, + bp1->transaction_mroot, + bp1->action_mroot, + bp1->schedule_version, expected_transaction_traces }; @@ -372,9 +371,9 @@ BOOST_AUTO_TEST_SUITE(block_extraction) signal_applied_transaction( onerror_trace, std::make_shared( transfer_trx ) ); - auto bsp1 = make_block_state( chain::block_id_type(), 1, 1, "bp.one"_n, + auto bp1 = make_block( chain::block_id_type(), 1, 1, "bp.one"_n, { chain::packed_transaction(transfer_trx) } ); - signal_accepted_block( bsp1 ); + signal_accepted_block( bp1 ); const std::vector expected_action_trace { { @@ -393,9 +392,9 @@ BOOST_AUTO_TEST_SUITE(block_extraction) { transfer_trx.id(), // transfer_trx.id() because that is the trx id known to the user expected_action_trace, - fc::enum_type{bsp1->block->transactions[0].status}, - bsp1->block->transactions[0].cpu_usage_us, - bsp1->block->transactions[0].net_usage_words, + fc::enum_type{bp1->transactions[0].status}, + bp1->transactions[0].cpu_usage_us, + bp1->transactions[0].net_usage_words, transfer_trx.get_signatures(), make_trx_header(transfer_trx.get_transaction()) } @@ -403,14 +402,14 @@ BOOST_AUTO_TEST_SUITE(block_extraction) }; const block_trace_v2 expected_block_trace { - bsp1->id, + bp1->calculate_id(), 1, - bsp1->prev(), + bp1->previous, chain::block_timestamp_type(1), "bp.one"_n, - bsp1->block->transaction_mroot, - bsp1->block->action_mroot, - bsp1->block->schedule_version, + bp1->transaction_mroot, + bp1->action_mroot, + bp1->schedule_version, expected_transaction_traces }; diff --git a/programs/cleos/httpc.hpp b/programs/cleos/httpc.hpp index 27b3ed3eb9..bae87d2b8d 100644 --- a/programs/cleos/httpc.hpp +++ b/programs/cleos/httpc.hpp @@ -34,7 +34,6 @@ namespace eosio { namespace client { namespace http { const string get_raw_block_func = chain_func_base + "/get_raw_block"; const string get_block_header_func = chain_func_base + "/get_block_header"; const string get_block_info_func = chain_func_base + "/get_block_info"; - const string get_block_header_state_func = chain_func_base + "/get_block_header_state"; const string get_account_func = chain_func_base + "/get_account"; const string get_table_func = chain_func_base + "/get_table_rows"; const string get_table_by_scope_func = chain_func_base + "/get_table_by_scope"; diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index 819d736426..cc3e0484f2 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -3016,7 +3016,6 @@ int main( int argc, char** argv ) { get_block_params params; auto getBlock = get->add_subcommand("block", localized("Retrieve a full block from the blockchain")); getBlock->add_option("block", params.blockArg, localized("The number or ID of the block to retrieve"))->required(); - getBlock->add_flag("--header-state", params.get_bhs, localized("Get block header state from fork database instead") ); getBlock->add_flag("--info", params.get_binfo, localized("Get block info from the blockchain by block num only") ); getBlock->add_flag("--raw", params.get_braw, localized("Get raw block from the blockchain") ); getBlock->add_flag("--header", params.get_bheader, localized("Get block header from the blockchain") ); @@ -3024,7 +3023,7 @@ int main( int argc, char** argv ) { getBlock->callback([¶ms] { int num_flags = params.get_bhs + params.get_binfo + params.get_braw + params.get_bheader + params.get_bheader_extensions; - EOSC_ASSERT( num_flags <= 1, "ERROR: Only one of the following flags can be set: --header-state, --info, --raw, --header, --header-with-extensions." ); + EOSC_ASSERT( num_flags <= 1, "ERROR: Only one of the following flags can be set: --info, --raw, --header, --header-with-extensions." ); if (params.get_binfo) { std::optional block_num; try { @@ -3037,9 +3036,7 @@ int main( int argc, char** argv ) { std::cout << fc::json::to_pretty_string(call(get_block_info_func, arg)) << std::endl; } else { const auto arg = fc::variant_object("block_num_or_id", params.blockArg); - if (params.get_bhs) { - std::cout << fc::json::to_pretty_string(call(get_block_header_state_func, arg)) << std::endl; - } else if (params.get_braw) { + if (params.get_braw) { std::cout << fc::json::to_pretty_string(call(get_raw_block_func, arg)) << std::endl; } else if (params.get_bheader || params.get_bheader_extensions) { std::cout << fc::json::to_pretty_string( diff --git a/programs/leap-util/actions/blocklog.cpp b/programs/leap-util/actions/blocklog.cpp index eee37e87df..88a933f81c 100644 --- a/programs/leap-util/actions/blocklog.cpp +++ b/programs/leap-util/actions/blocklog.cpp @@ -266,25 +266,26 @@ int blocklog_actions::read_log() { opt->first_block = block_logger.first_block_num(); } - eosio::chain::branch_type fork_db_branch; + using fork_database_t = fork_database_legacy; // [greg todo] what is it is not a legacy fork_db? + fork_database_t::branch_type fork_db_branch; if(std::filesystem::exists(std::filesystem::path(opt->blocks_dir) / config::reversible_blocks_dir_name / config::forkdb_filename)) { ilog("opening fork_db"); - fork_database fork_db(std::filesystem::path(opt->blocks_dir) / config::reversible_blocks_dir_name); + fork_database_t fork_db(std::filesystem::path(opt->blocks_dir) / config::reversible_blocks_dir_name); fork_db.open([](block_timestamp_type timestamp, const flat_set& cur_features, const vector& new_features) {}); - fork_db_branch = fork_db.fetch_branch(fork_db.head()->id); + fork_db_branch = fork_db.fetch_branch(fork_db.head()->id()); if(fork_db_branch.empty()) { elog("no blocks available in reversible block database: only block_log blocks are available"); } else { auto first = fork_db_branch.rbegin(); auto last = fork_db_branch.rend() - 1; ilog("existing reversible fork_db block num ${first} through block num ${last} ", - ("first", (*first)->block_num)("last", (*last)->block_num)); - EOS_ASSERT(end->block_num() + 1 == (*first)->block_num, block_log_exception, + ("first", (*first)->block_num())("last", (*last)->block_num())); + EOS_ASSERT(end->block_num() + 1 == (*first)->block_num(), block_log_exception, "fork_db does not start at end of block log"); } } diff --git a/tests/plugin_http_api_test.py b/tests/plugin_http_api_test.py index c11a5cc21f..f9628847cc 100755 --- a/tests/plugin_http_api_test.py +++ b/tests/plugin_http_api_test.py @@ -336,25 +336,6 @@ def test_ChainApi(self) : ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertEqual(ret_json["payload"]["block_num"], 1) - # get_block_header_state with empty parameter - command = "get_block_header_state" - ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) - self.assertEqual(ret_json["code"], 400) - self.assertEqual(ret_json["error"]["code"], 3200006) - # get_block_header_state with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) - self.assertEqual(ret_json["code"], 400) - self.assertEqual(ret_json["error"]["code"], 3200006) - # get_block_header_state with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) - self.assertEqual(ret_json["code"], 400) - self.assertEqual(ret_json["error"]["code"], 3200006) - # get_block_header_state with valid parameter, the irreversible is not available, unknown block number - payload = {"block_num_or_id":1} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) - self.assertEqual(ret_json["code"], 400) - self.assertEqual(ret_json["error"]["code"], 3100002) - # get_account with empty parameter command = "get_account" ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) diff --git a/tests/test_chain_plugin.cpp b/tests/test_chain_plugin.cpp index de9dbcb13e..06e5c46753 100644 --- a/tests/test_chain_plugin.cpp +++ b/tests/test_chain_plugin.cpp @@ -345,7 +345,7 @@ class chain_plugin_tester : public validating_tester { } produce_blocks( 250 ); - auto producer_keys = control->head_block_state()->active_schedule.producers; + auto producer_keys = control->active_producers().producers; BOOST_CHECK_EQUAL( 21u, producer_keys.size() ); BOOST_CHECK_EQUAL( name("defproducera"), producer_keys[0].producer_name ); diff --git a/unittests/api_tests.cpp b/unittests/api_tests.cpp index f5c52c6ff1..457558217e 100644 --- a/unittests/api_tests.cpp +++ b/unittests/api_tests.cpp @@ -3858,6 +3858,9 @@ BOOST_AUTO_TEST_CASE(get_code_hash_tests) { try { check("test"_n, 3); } FC_LOG_AND_RETHROW() } +#if 0 +// [greg todo] re-implement the test after https://github.com/AntelopeIO/leap/issues/1911 is done + // test set_finalizer host function serialization and tester set_finalizers BOOST_AUTO_TEST_CASE(set_finalizer_test) { try { validating_tester t; @@ -3906,4 +3909,7 @@ BOOST_AUTO_TEST_CASE(set_finalizer_test) { try { } FC_LOG_AND_RETHROW() } +#endif + + BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/block_header_state_tests.cpp b/unittests/block_header_state_tests.cpp index 6324fab7d6..c5053919b3 100644 --- a/unittests/block_header_state_tests.cpp +++ b/unittests/block_header_state_tests.cpp @@ -1,4 +1,5 @@ #include +#include #include diff --git a/unittests/block_tests.cpp b/unittests/block_tests.cpp index 11a0bce851..9419557ea7 100644 --- a/unittests/block_tests.cpp +++ b/unittests/block_tests.cpp @@ -38,8 +38,8 @@ BOOST_AUTO_TEST_CASE(block_with_invalid_tx_test) copy_b->transaction_mroot = canonical_merkle( std::move(trx_digests) ); // Re-sign the block - auto header_bmroot = digest_type::hash( std::make_pair( copy_b->digest(), main.control->head_block_state()->blockroot_merkle.get_root() ) ); - auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, main.control->head_block_state()->pending_schedule.schedule_hash) ); + auto header_bmroot = digest_type::hash( std::make_pair( copy_b->digest(), main.control->head_block_state_legacy()->blockroot_merkle.get_root() ) ); + auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, main.control->head_block_state_legacy()->pending_schedule.schedule_hash) ); copy_b->producer_signature = main.get_private_key(config::system_account_name, "active").sign(sig_digest); // Push block with invalid transaction to other chain @@ -77,8 +77,8 @@ BOOST_AUTO_TEST_CASE(block_with_invalid_tx_mroot_test) copy_b->transactions.back().trx = std::move(invalid_packed_tx); // Re-sign the block - auto header_bmroot = digest_type::hash( std::make_pair( copy_b->digest(), main.control->head_block_state()->blockroot_merkle.get_root() ) ); - auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, main.control->head_block_state()->pending_schedule.schedule_hash) ); + auto header_bmroot = digest_type::hash( std::make_pair( copy_b->digest(), main.control->head_block_state_legacy()->blockroot_merkle.get_root() ) ); + auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, main.control->head_block_state_legacy()->pending_schedule.schedule_hash) ); copy_b->producer_signature = main.get_private_key(config::system_account_name, "active").sign(sig_digest); // Push block with invalid transaction to other chain @@ -118,8 +118,8 @@ std::pair corrupt_trx_in_block(validating_te copy_b->transaction_mroot = canonical_merkle( std::move(trx_digests) ); // Re-sign the block - auto header_bmroot = digest_type::hash( std::make_pair( copy_b->digest(), main.control->head_block_state()->blockroot_merkle.get_root() ) ); - auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, main.control->head_block_state()->pending_schedule.schedule_hash) ); + auto header_bmroot = digest_type::hash( std::make_pair( copy_b->digest(), main.control->head_block_state_legacy()->blockroot_merkle.get_root() ) ); + auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, main.control->head_block_state_legacy()->pending_schedule.schedule_hash) ); copy_b->producer_signature = main.get_private_key(b->producer, "active").sign(sig_digest); return std::pair(b, copy_b); } diff --git a/unittests/bootseq_tests.cpp b/unittests/bootseq_tests.cpp index e5d2a5a344..eae5f8eee4 100644 --- a/unittests/bootseq_tests.cpp +++ b/unittests/bootseq_tests.cpp @@ -272,7 +272,7 @@ BOOST_FIXTURE_TEST_CASE( bootseq_test, bootseq_tester ) { // No producers will be set, since the total activated stake is less than 150,000,000 produce_blocks_for_n_rounds(2); // 2 rounds since new producer schedule is set when the first block of next round is irreversible - auto active_schedule = control->head_block_state()->active_schedule; + auto active_schedule = control->active_producers(); BOOST_TEST(active_schedule.producers.size() == 1u); BOOST_TEST(active_schedule.producers.front().producer_name == name("eosio")); @@ -287,7 +287,7 @@ BOOST_FIXTURE_TEST_CASE( bootseq_test, bootseq_tester ) { // Since the total vote stake is more than 150,000,000, the new producer set will be set produce_blocks_for_n_rounds(2); // 2 rounds since new producer schedule is set when the first block of next round is irreversible - active_schedule = control->head_block_state()->active_schedule; + active_schedule = control->active_producers(); BOOST_REQUIRE(active_schedule.producers.size() == 21); BOOST_TEST(active_schedule.producers.at( 0).producer_name == name("proda")); BOOST_TEST(active_schedule.producers.at( 1).producer_name == name("prodb")); diff --git a/unittests/chain_tests.cpp b/unittests/chain_tests.cpp index 05fb688e3a..40e49e5a04 100644 --- a/unittests/chain_tests.cpp +++ b/unittests/chain_tests.cpp @@ -18,23 +18,24 @@ BOOST_AUTO_TEST_SUITE(chain_tests) BOOST_AUTO_TEST_CASE( replace_producer_keys ) try { validating_tester tester; - const auto head_ptr = tester.control->head_block_state(); - BOOST_REQUIRE(head_ptr); - const auto new_key = get_public_key(name("newkey"), config::active_name.to_string()); // make sure new keys is not used - for(const auto& prod : head_ptr->active_schedule.producers) { + for(const auto& prod : tester.control->active_producers().producers) { for(const auto& key : std::get(prod.authority).keys){ BOOST_REQUIRE(key.key != new_key); } } - const auto old_version = head_ptr->pending_schedule.schedule.version; + const auto old_pending_version = tester.control->pending_producers().version; + const auto old_version = tester.control->active_producers().version; BOOST_REQUIRE_NO_THROW(tester.control->replace_producer_keys(new_key)); - const auto new_version = head_ptr->pending_schedule.schedule.version; + const auto new_version = tester.control->active_producers().version; + const auto pending_version = tester.control->pending_producers().version; // make sure version not been changed BOOST_REQUIRE(old_version == new_version); + BOOST_REQUIRE(old_version == pending_version); + BOOST_REQUIRE(pending_version == old_pending_version); const auto& gpo = tester.control->db().get(); BOOST_REQUIRE(!gpo.proposed_schedule_block_num); @@ -43,7 +44,7 @@ BOOST_AUTO_TEST_CASE( replace_producer_keys ) try { const uint32_t expected_threshold = 1; const weight_type expected_key_weight = 1; - for(const auto& prod : head_ptr->active_schedule.producers) { + for(const auto& prod : tester.control->pending_producers().producers) { BOOST_REQUIRE_EQUAL(std::get(prod.authority).threshold, expected_threshold); for(const auto& key : std::get(prod.authority).keys){ BOOST_REQUIRE_EQUAL(key.key, new_key); @@ -155,10 +156,6 @@ BOOST_AUTO_TEST_CASE( signal_validated_blocks ) try { const auto& [ block, id ] = t; auto block_num = block->block_num(); BOOST_CHECK(block); - const auto& bsp_by_id = chain.control->fetch_block_state_by_id(id); - BOOST_CHECK(bsp_by_id->block_num == block_num); - const auto& bsp_by_number = chain.control->fetch_block_state_by_number(block_num); // verify it can be found (has to be validated) - BOOST_CHECK(bsp_by_number->id == id); BOOST_CHECK(chain.control->fetch_block_by_id(id) == block); BOOST_CHECK(chain.control->fetch_block_by_number(block_num) == block); BOOST_REQUIRE(chain.control->fetch_block_header_by_number(block_num)); @@ -174,10 +171,6 @@ BOOST_AUTO_TEST_CASE( signal_validated_blocks ) try { const auto& [ block, id ] = t; auto block_num = block->block_num(); BOOST_CHECK(block); - const auto& bsp_by_id = validator.control->fetch_block_state_by_id(id); - BOOST_CHECK(bsp_by_id->block_num == block_num); - const auto& bsp_by_number = validator.control->fetch_block_state_by_number(block_num); // verify it can be found (has to be validated) - BOOST_CHECK(bsp_by_number->id == id); BOOST_CHECK(validator.control->fetch_block_by_id(id) == block); BOOST_CHECK(validator.control->fetch_block_by_number(block_num) == block); BOOST_REQUIRE(validator.control->fetch_block_header_by_number(block_num)); diff --git a/unittests/database_tests.cpp b/unittests/database_tests.cpp index 1ddd56e64e..1f0ba01ca7 100644 --- a/unittests/database_tests.cpp +++ b/unittests/database_tests.cpp @@ -55,7 +55,7 @@ BOOST_AUTO_TEST_SUITE(database_tests) // Check the last irreversible block number is set correctly, with one producer, irreversibility should only just 1 block before const auto expected_last_irreversible_block_number = test.control->head_block_num() - 1; - BOOST_TEST(test.control->head_block_state()->dpos_irreversible_blocknum == expected_last_irreversible_block_number); + BOOST_TEST(test.control->head_block_state_legacy()->dpos_irreversible_blocknum == expected_last_irreversible_block_number); // Ensure that future block doesn't exist const auto nonexisting_future_block_num = test.control->head_block_num() + 1; BOOST_TEST(test.control->fetch_block_by_number(nonexisting_future_block_num) == nullptr); @@ -65,7 +65,7 @@ BOOST_AUTO_TEST_SUITE(database_tests) const auto next_expected_last_irreversible_block_number = test.control->head_block_num() - 1; // Check the last irreversible block number is updated correctly - BOOST_TEST(test.control->head_block_state()->dpos_irreversible_blocknum == next_expected_last_irreversible_block_number); + BOOST_TEST(test.control->head_block_state_legacy()->dpos_irreversible_blocknum == next_expected_last_irreversible_block_number); // Previous nonexisting future block should exist by now BOOST_CHECK_NO_THROW(test.control->fetch_block_by_number(nonexisting_future_block_num)); // Check the latest head block match diff --git a/unittests/eosio_system_tester.hpp b/unittests/eosio_system_tester.hpp index a477d6d41b..d793e938c4 100644 --- a/unittests/eosio_system_tester.hpp +++ b/unittests/eosio_system_tester.hpp @@ -473,7 +473,7 @@ class eosio_system_tester : public validating_tester { } produce_blocks( 250 ); - auto producer_keys = control->head_block_state()->active_schedule.producers; + auto producer_keys = control->active_producers().producers; BOOST_REQUIRE_EQUAL( 21u, producer_keys.size() ); BOOST_REQUIRE_EQUAL( name("defproducera"), producer_keys[0].producer_name ); diff --git a/unittests/forked_tests.cpp b/unittests/forked_tests.cpp index 49b5319317..dc135a509c 100644 --- a/unittests/forked_tests.cpp +++ b/unittests/forked_tests.cpp @@ -53,7 +53,7 @@ BOOST_AUTO_TEST_CASE( fork_with_bad_block ) try { // produce 6 blocks on bios for (int i = 0; i < 6; i ++) { bios.produce_block(); - BOOST_REQUIRE_EQUAL( bios.control->head_block_state()->header.producer.to_string(), "a" ); + BOOST_REQUIRE_EQUAL( bios.control->head_block()->producer.to_string(), "a" ); } vector forks(7); @@ -73,7 +73,7 @@ BOOST_AUTO_TEST_CASE( fork_with_bad_block ) try { auto copy_b = std::make_shared(b->clone()); if (j == i) { // corrupt this block - fork.block_merkle = remote.control->head_block_state()->blockroot_merkle; + fork.block_merkle = remote.control->head_block_state_legacy()->blockroot_merkle; copy_b->action_mroot._hash[0] ^= 0x1ULL; } else if (j < i) { // link to a corrupted chain @@ -82,7 +82,7 @@ BOOST_AUTO_TEST_CASE( fork_with_bad_block ) try { // re-sign the block auto header_bmroot = digest_type::hash( std::make_pair( copy_b->digest(), fork.block_merkle.get_root() ) ); - auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, remote.control->head_block_state()->pending_schedule.schedule_hash) ); + auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, remote.control->head_block_state_legacy()->pending_schedule.schedule_hash) ); copy_b->producer_signature = remote.get_private_key("b"_n, "active").sign(sig_digest); // add this new block to our corrupted block merkle @@ -117,9 +117,9 @@ BOOST_AUTO_TEST_CASE( fork_with_bad_block ) try { } // make sure we can still produce a blocks until irreversibility moves - auto lib = bios.control->head_block_state()->dpos_irreversible_blocknum; + auto lib = bios.control->head_block_state_legacy()->dpos_irreversible_blocknum; size_t tries = 0; - while (bios.control->head_block_state()->dpos_irreversible_blocknum == lib && ++tries < 10000) { + while (bios.control->head_block_state_legacy()->dpos_irreversible_blocknum == lib && ++tries < 10000) { bios.produce_block(); } @@ -303,7 +303,7 @@ BOOST_AUTO_TEST_CASE( prune_remove_branch ) try { auto nextproducer = [](tester &c, int skip_interval) ->account_name { auto head_time = c.control->head_block_time(); auto next_time = head_time + fc::milliseconds(config::block_interval_ms * skip_interval); - return c.control->head_block_state()->get_scheduled_producer(next_time).producer_name; + return c.control->active_producers().get_scheduled_producer(next_time).producer_name; }; // fork c: 2 producers: dan, sam @@ -367,10 +367,9 @@ BOOST_AUTO_TEST_CASE( validator_accepts_valid_blocks ) try { BOOST_CHECK_EQUAL( n2.control->head_block_id(), id ); BOOST_REQUIRE( first_block ); - const auto& first_bsp = n2.control->fetch_block_state_by_id(first_id); - first_bsp->verify_signee(); - BOOST_CHECK_EQUAL( first_header.calculate_id(), first_block->calculate_id() ); - BOOST_CHECK( first_header.producer_signature == first_block->producer_signature ); + const auto& first_bp = n2.control->fetch_block_by_id(first_id); + BOOST_CHECK_EQUAL( first_bp->calculate_id(), first_block->calculate_id() ); + BOOST_CHECK( first_bp->producer_signature == first_block->producer_signature ); c.disconnect(); @@ -495,8 +494,8 @@ BOOST_AUTO_TEST_CASE( irreversible_mode ) try { BOOST_CHECK_EQUAL( does_account_exist( irreversible, "alice"_n ), true ); { - auto bs = irreversible.control->fetch_block_state_by_id( fork_first_block_id ); - BOOST_REQUIRE( bs && bs->id == fork_first_block_id ); + auto b = irreversible.control->fetch_block_by_id( fork_first_block_id ); + BOOST_REQUIRE( b && b->calculate_id() == fork_first_block_id ); } main.produce_block(); @@ -508,8 +507,8 @@ BOOST_AUTO_TEST_CASE( irreversible_mode ) try { push_blocks( main, irreversible, hbn5 ); { - auto bs = irreversible.control->fetch_block_state_by_id( fork_first_block_id ); - BOOST_REQUIRE( !bs ); + auto b = irreversible.control->fetch_block_by_id( fork_first_block_id ); + BOOST_REQUIRE( !b ); } } FC_LOG_AND_RETHROW() diff --git a/unittests/producer_schedule_hs_tests.cpp b/unittests/producer_schedule_hs_tests.cpp index 0c0e0c8494..6e18caa418 100644 --- a/unittests/producer_schedule_hs_tests.cpp +++ b/unittests/producer_schedule_hs_tests.cpp @@ -20,6 +20,9 @@ inline account_name get_expected_producer(const vector& sche }; } // anonymous namespace +#if 0 + +// [greg todo] Enable test when https://github.com/AntelopeIO/leap/issues/1980 is completed BOOST_FIXTURE_TEST_CASE( verify_producer_schedule_after_hotstuff_activation, validating_tester ) try { @@ -108,6 +111,8 @@ BOOST_FIXTURE_TEST_CASE( verify_producer_schedule_after_hotstuff_activation, val } FC_LOG_AND_RETHROW() +#endif + /** TODO: Enable tests after hotstuff LIB is working BOOST_FIXTURE_TEST_CASE( producer_schedule_promotion_test, validating_tester ) try { diff --git a/unittests/producer_schedule_tests.cpp b/unittests/producer_schedule_tests.cpp index eea947804b..dbfd9531ce 100644 --- a/unittests/producer_schedule_tests.cpp +++ b/unittests/producer_schedule_tests.cpp @@ -28,13 +28,14 @@ BOOST_FIXTURE_TEST_CASE( verify_producer_schedule, validating_tester ) try { const uint32_t check_duration = 1000; // number of blocks bool scheduled_changed_to_new = false; for (uint32_t i = 0; i < check_duration; ++i) { - const auto current_schedule = control->head_block_state()->active_schedule.producers; + const auto current_schedule = control->active_producers().producers; if (new_prod_schd == current_schedule) { scheduled_changed_to_new = true; } // Produce block produce_block(); + control->abort_block(); // abort started block in produce_block so activate_producers() is off head // Check if the producer is the same as what we expect const auto block_time = control->head_block_time(); @@ -401,8 +402,8 @@ BOOST_AUTO_TEST_CASE( producer_watermark_test ) try { wdump((alice_last_produced_block_num)); { - wdump((c.control->head_block_state()->producer_to_last_produced)); - const auto& last_produced = c.control->head_block_state()->producer_to_last_produced; + wdump((c.control->head_block_state_legacy()->producer_to_last_produced)); + const auto& last_produced = c.control->head_block_state_legacy()->producer_to_last_produced; auto alice_itr = last_produced.find( "alice"_n ); BOOST_REQUIRE( alice_itr != last_produced.end() ); BOOST_CHECK_EQUAL( alice_itr->second, alice_last_produced_block_num ); @@ -638,8 +639,8 @@ BOOST_AUTO_TEST_CASE( extra_signatures_test ) try { BOOST_REQUIRE_EQUAL( additional_sigs.size(), 1u ); // Generate the extra signature and add to additonal_sigs. - auto header_bmroot = digest_type::hash( std::make_pair( b->digest(), remote.control->head_block_state()->blockroot_merkle.get_root() ) ); - auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, remote.control->head_block_state()->pending_schedule.schedule_hash) ); + auto header_bmroot = digest_type::hash( std::make_pair( b->digest(), remote.control->head_block_state_legacy()->blockroot_merkle.get_root() ) ); + auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, remote.control->head_block_state_legacy()->pending_schedule.schedule_hash) ); additional_sigs.emplace_back( remote.get_private_key("alice"_n, "bs3").sign(sig_digest) ); additional_sigs.emplace_back( remote.get_private_key("alice"_n, "bs4").sign(sig_digest) ); diff --git a/unittests/protocol_feature_tests.cpp b/unittests/protocol_feature_tests.cpp index 01aa4da2d0..09df3ebdcf 100644 --- a/unittests/protocol_feature_tests.cpp +++ b/unittests/protocol_feature_tests.cpp @@ -1593,19 +1593,17 @@ BOOST_AUTO_TEST_CASE( producer_schedule_change_extension_test ) { try { { // ensure producer_schedule_change_extension is rejected - const auto& hbs = remote.control->head_block_state(); - // create a bad block that has the producer schedule change extension before the feature upgrade auto bad_block = std::make_shared(last_legacy_block->clone()); emplace_extension( bad_block->header_extensions, producer_schedule_change_extension::extension_id(), - fc::raw::pack(std::make_pair(hbs->active_schedule.version + 1, std::vector{})) + fc::raw::pack(std::make_pair(remote.control->active_producers().version + 1, std::vector{})) ); // re-sign the bad block - auto header_bmroot = digest_type::hash( std::make_pair( bad_block->digest(), remote.control->head_block_state()->blockroot_merkle ) ); - auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, remote.control->head_block_state()->pending_schedule.schedule_hash) ); + auto header_bmroot = digest_type::hash( std::make_pair( bad_block->digest(), remote.control->head_block_state_legacy()->blockroot_merkle ) ); + auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, remote.control->head_block_state_legacy()->pending_schedule.schedule_hash) ); bad_block->producer_signature = remote.get_private_key("eosio"_n, "active").sign(sig_digest); // ensure it is rejected as an unknown extension @@ -1616,15 +1614,13 @@ BOOST_AUTO_TEST_CASE( producer_schedule_change_extension_test ) { try { } { // ensure that non-null new_producers is accepted (and fails later in validation) - const auto& hbs = remote.control->head_block_state(); - // create a bad block that has the producer schedule change extension before the feature upgrade auto bad_block = std::make_shared(last_legacy_block->clone()); - bad_block->new_producers = legacy::producer_schedule_type{hbs->active_schedule.version + 1, {}}; + bad_block->new_producers = legacy::producer_schedule_type{remote.control->active_producers().version + 1, {}}; // re-sign the bad block - auto header_bmroot = digest_type::hash( std::make_pair( bad_block->digest(), remote.control->head_block_state()->blockroot_merkle ) ); - auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, remote.control->head_block_state()->pending_schedule.schedule_hash) ); + auto header_bmroot = digest_type::hash( std::make_pair( bad_block->digest(), remote.control->head_block_state_legacy()->blockroot_merkle ) ); + auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, remote.control->head_block_state_legacy()->pending_schedule.schedule_hash) ); bad_block->producer_signature = remote.get_private_key("eosio"_n, "active").sign(sig_digest); // ensure it is accepted (but rejected because it doesn't match expected state) @@ -1640,19 +1636,17 @@ BOOST_AUTO_TEST_CASE( producer_schedule_change_extension_test ) { try { auto first_new_block = c.produce_block(); { - const auto& hbs = remote.control->head_block_state(); - // create a bad block that has the producer schedule change extension that is valid but not warranted by actions in the block auto bad_block = std::make_shared(first_new_block->clone()); emplace_extension( bad_block->header_extensions, producer_schedule_change_extension::extension_id(), - fc::raw::pack(std::make_pair(hbs->active_schedule.version + 1, std::vector{})) + fc::raw::pack(std::make_pair(remote.control->active_producers().version + 1, std::vector{})) ); // re-sign the bad block - auto header_bmroot = digest_type::hash( std::make_pair( bad_block->digest(), remote.control->head_block_state()->blockroot_merkle ) ); - auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, remote.control->head_block_state()->pending_schedule.schedule_hash) ); + auto header_bmroot = digest_type::hash( std::make_pair( bad_block->digest(), remote.control->head_block_state_legacy()->blockroot_merkle ) ); + auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, remote.control->head_block_state_legacy()->pending_schedule.schedule_hash) ); bad_block->producer_signature = remote.get_private_key("eosio"_n, "active").sign(sig_digest); // ensure it is rejected because it doesn't match expected state (but the extention was accepted) @@ -1663,15 +1657,13 @@ BOOST_AUTO_TEST_CASE( producer_schedule_change_extension_test ) { try { } { // ensure that non-null new_producers is rejected - const auto& hbs = remote.control->head_block_state(); - // create a bad block that has the producer schedule change extension before the feature upgrade auto bad_block = std::make_shared(first_new_block->clone()); - bad_block->new_producers = legacy::producer_schedule_type{hbs->active_schedule.version + 1, {}}; + bad_block->new_producers = legacy::producer_schedule_type{remote.control->active_producers().version + 1, {}}; // re-sign the bad block - auto header_bmroot = digest_type::hash( std::make_pair( bad_block->digest(), remote.control->head_block_state()->blockroot_merkle ) ); - auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, remote.control->head_block_state()->pending_schedule.schedule_hash) ); + auto header_bmroot = digest_type::hash( std::make_pair( bad_block->digest(), remote.control->head_block_state_legacy()->blockroot_merkle ) ); + auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, remote.control->head_block_state_legacy()->pending_schedule.schedule_hash) ); bad_block->producer_signature = remote.get_private_key("eosio"_n, "active").sign(sig_digest); // ensure it is rejected because the new_producers field is not null @@ -2246,8 +2238,8 @@ BOOST_AUTO_TEST_CASE( block_validation_after_stage_1_test ) { try { copy_b->transaction_mroot = canonical_merkle( std::move(trx_digests) ); // Re-sign the block - auto header_bmroot = digest_type::hash( std::make_pair( copy_b->digest(), tester1.control->head_block_state()->blockroot_merkle.get_root() ) ); - auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, tester1.control->head_block_state()->pending_schedule.schedule_hash) ); + auto header_bmroot = digest_type::hash( std::make_pair( copy_b->digest(), tester1.control->head_block_state_legacy()->blockroot_merkle.get_root() ) ); + auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, tester1.control->head_block_state_legacy()->pending_schedule.schedule_hash) ); copy_b->producer_signature = tester1.get_private_key(config::system_account_name, "active").sign(sig_digest); // Create the second chain diff --git a/unittests/special_accounts_tests.cpp b/unittests/special_accounts_tests.cpp index bd3965ddc8..133e39ccef 100644 --- a/unittests/special_accounts_tests.cpp +++ b/unittests/special_accounts_tests.cpp @@ -1,19 +1,10 @@ -#include -#include -#include - #include #include #include #include #include -#include - -#include -#include -#include -#include +#include using namespace eosio; using namespace chain; @@ -44,7 +35,7 @@ BOOST_FIXTURE_TEST_CASE(accounts_exists, tester) auto producers = chain1_db.find(config::producers_account_name); BOOST_CHECK(producers != nullptr); - const auto& active_producers = control->head_block_state()->active_schedule; + const auto& active_producers = control->active_producers(); const auto& producers_active_authority = chain1_db.get(boost::make_tuple(config::producers_account_name, config::active_name)); auto expected_threshold = (active_producers.producers.size() * 2)/3 + 1; diff --git a/unittests/state_history_tests.cpp b/unittests/state_history_tests.cpp index 2114bf7647..2c74310248 100644 --- a/unittests/state_history_tests.cpp +++ b/unittests/state_history_tests.cpp @@ -634,8 +634,8 @@ struct state_history_tester : state_history_tester_logs, tester { control.accepted_block.connect([&](block_signal_params t) { const auto& [ block, id ] = t; eosio::state_history_log_header header{.magic = eosio::ship_magic(eosio::ship_current_version, 0), - .block_id = id, - .payload_size = 0}; + .block_id = id, + .payload_size = 0}; traces_log.pack_and_write_entry(header, block->previous, [this, &block](auto&& buf) { trace_converter.pack(buf, false, block);