forked from onflow/flow-go
-
Notifications
You must be signed in to change notification settings - Fork 0
/
builder_test.go
1463 lines (1285 loc) · 61.5 KB
/
builder_test.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
package consensus
import (
"math/rand"
"os"
"testing"
"github.com/dgraph-io/badger/v2"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/onflow/flow-go/model/flow"
mempoolAPIs "github.com/onflow/flow-go/module/mempool"
mempoolImpl "github.com/onflow/flow-go/module/mempool/consensus"
mempool "github.com/onflow/flow-go/module/mempool/mock"
"github.com/onflow/flow-go/module/metrics"
"github.com/onflow/flow-go/module/trace"
realproto "github.com/onflow/flow-go/state/protocol"
protocol "github.com/onflow/flow-go/state/protocol/mock"
storerr "github.com/onflow/flow-go/storage"
"github.com/onflow/flow-go/storage/badger/operation"
storage "github.com/onflow/flow-go/storage/mock"
"github.com/onflow/flow-go/utils/unittest"
)
func TestConsensusBuilder(t *testing.T) {
suite.Run(t, new(BuilderSuite))
}
type BuilderSuite struct {
suite.Suite
// test helpers
firstID flow.Identifier // first block in the range we look at
finalID flow.Identifier // last finalized block
parentID flow.Identifier // Parent block we build on
finalizedBlockIDs []flow.Identifier // blocks between first and final
pendingBlockIDs []flow.Identifier // blocks between final and parent
resultForBlock map[flow.Identifier]*flow.ExecutionResult // map: BlockID -> Execution Result
resultByID map[flow.Identifier]*flow.ExecutionResult // map: result ID -> Execution Result
receiptsByID map[flow.Identifier]*flow.ExecutionReceipt // map: receipt ID -> ExecutionReceipt
receiptsByBlockID map[flow.Identifier]flow.ExecutionReceiptList // map: block ID -> flow.ExecutionReceiptList
// used to populate and test the seal mempool
chain []*flow.Seal // chain of seals starting first
irsList []*flow.IncorporatedResultSeal // chain of IncorporatedResultSeals
irsMap map[flow.Identifier]*flow.IncorporatedResultSeal // index for irsList
// mempools consumed by builder
pendingGuarantees []*flow.CollectionGuarantee
pendingReceipts []*flow.ExecutionReceipt
pendingSeals map[flow.Identifier]*flow.IncorporatedResultSeal // storage for the seal mempool
// storage for dbs
headers map[flow.Identifier]*flow.Header
index map[flow.Identifier]*flow.Index
blocks map[flow.Identifier]*flow.Block
blockChildren map[flow.Identifier][]flow.Identifier // ids of children blocks
lastSeal *flow.Seal
// real dependencies
dir string
db *badger.DB
sentinel uint64
setter func(*flow.Header) error
// mocked dependencies
state *protocol.ParticipantState
headerDB *storage.Headers
sealDB *storage.Seals
indexDB *storage.Index
blockDB *storage.Blocks
resultDB *storage.ExecutionResults
receiptsDB *storage.ExecutionReceipts
guarPool *mempool.Guarantees
sealPool *mempool.IncorporatedResultSeals
recPool *mempool.ExecutionTree
// tracking behaviour
assembled *flow.Payload // built payload
// component under test
build *Builder
}
func (bs *BuilderSuite) storeBlock(block *flow.Block) {
bs.headers[block.ID()] = block.Header
bs.blocks[block.ID()] = block
bs.index[block.ID()] = block.Payload.Index()
bs.blockChildren[block.Header.ParentID] = append(bs.blockChildren[block.Header.ParentID], block.ID())
for _, result := range block.Payload.Results {
bs.resultByID[result.ID()] = result
}
}
// createAndRecordBlock creates a new block chained to the previous block.
// The new block contains a receipt for a result of the previous
// block, which is also used to create a seal for the previous block. The seal
// and the result are combined in an IncorporatedResultSeal which is a candidate
// for the seals mempool.
func (bs *BuilderSuite) createAndRecordBlock(parentBlock *flow.Block, candidateSealForParent bool) *flow.Block {
block := unittest.BlockWithParentFixture(parentBlock.Header)
// Create a receipt for a result of the parentBlock block,
// and add it to the payload. The corresponding IncorporatedResult will be used to
// seal the parentBlock, and to create an IncorporatedResultSeal for the seal mempool.
var incorporatedResultForPrevBlock *flow.IncorporatedResult
previousResult, found := bs.resultForBlock[parentBlock.ID()]
if !found {
panic("missing execution result for parent")
}
receipt := unittest.ExecutionReceiptFixture(unittest.WithResult(previousResult))
block.Payload.Receipts = append(block.Payload.Receipts, receipt.Meta())
block.Payload.Results = append(block.Payload.Results, &receipt.ExecutionResult)
incorporatedResultForPrevBlock = unittest.IncorporatedResult.Fixture(
unittest.IncorporatedResult.WithResult(previousResult),
unittest.IncorporatedResult.WithIncorporatedBlockID(block.ID()),
)
result := unittest.ExecutionResultFixture(
unittest.WithBlock(block),
unittest.WithPreviousResult(*previousResult),
)
bs.resultForBlock[result.BlockID] = result
bs.resultByID[result.ID()] = result
bs.receiptsByID[receipt.ID()] = receipt
bs.receiptsByBlockID[receipt.ExecutionResult.BlockID] = append(bs.receiptsByBlockID[receipt.ExecutionResult.BlockID], receipt)
// record block in dbs
bs.storeBlock(block)
if candidateSealForParent {
// seal the parentBlock block with the result included in this block.
bs.chainSeal(incorporatedResultForPrevBlock)
}
return block
}
// Create a seal for the result's block. The corresponding
// IncorporatedResultSeal, which ties the seal to the incorporated result it
// seals, is also recorded for future access.
func (bs *BuilderSuite) chainSeal(incorporatedResult *flow.IncorporatedResult) {
incorporatedResultSeal := unittest.IncorporatedResultSeal.Fixture(
unittest.IncorporatedResultSeal.WithResult(incorporatedResult.Result),
unittest.IncorporatedResultSeal.WithIncorporatedBlockID(incorporatedResult.IncorporatedBlockID),
)
bs.chain = append(bs.chain, incorporatedResultSeal.Seal)
bs.irsMap[incorporatedResultSeal.ID()] = incorporatedResultSeal
bs.irsList = append(bs.irsList, incorporatedResultSeal)
}
// SetupTest constructs the following chain of blocks:
//
// [first] <- [F0] <- [F1] <- [F2] <- [F3] <- [final] <- [A0] <- [A1] <- [A2] <- [A3] <- [parent]
//
// Where block
// - [first] is sealed and finalized
// - [F0] ... [F4] and [final] are finalized, unsealed blocks with candidate seals are included in mempool
// - [A0] ... [A2] are non-finalized, unsealed blocks with candidate seals are included in mempool
// - [A3] and [parent] are non-finalized, unsealed blocks _without_ candidate seals
//
// Each block incorporates the result for its immediate parent.
//
// Note: In the happy path, the blocks [A3] and [parent] will not have candidate seal for the following reason:
// For the verifiers to start checking a result R, they need a source of randomness for the block _incorporating_
// result R. The result for block [A3] is incorporated in [parent], which does _not_ have a child yet.
func (bs *BuilderSuite) SetupTest() {
// set up no-op dependencies
noopMetrics := metrics.NewNoopCollector()
noopTracer := trace.NewNoopTracer()
// set up test parameters
numFinalizedBlocks := 4
numPendingBlocks := 4
// reset test helpers
bs.pendingBlockIDs = nil
bs.finalizedBlockIDs = nil
bs.resultForBlock = make(map[flow.Identifier]*flow.ExecutionResult)
bs.resultByID = make(map[flow.Identifier]*flow.ExecutionResult)
bs.receiptsByID = make(map[flow.Identifier]*flow.ExecutionReceipt)
bs.receiptsByBlockID = make(map[flow.Identifier]flow.ExecutionReceiptList)
bs.chain = nil
bs.irsMap = make(map[flow.Identifier]*flow.IncorporatedResultSeal)
bs.irsList = nil
// initialize the pools
bs.pendingGuarantees = nil
bs.pendingSeals = nil
bs.pendingReceipts = nil
// initialise the dbs
bs.lastSeal = nil
bs.headers = make(map[flow.Identifier]*flow.Header)
//bs.heights = make(map[uint64]*flow.Header)
bs.index = make(map[flow.Identifier]*flow.Index)
bs.blocks = make(map[flow.Identifier]*flow.Block)
bs.blockChildren = make(map[flow.Identifier][]flow.Identifier)
// initialize behaviour tracking
bs.assembled = nil
// Construct the [first] block:
first := unittest.BlockFixture()
bs.storeBlock(&first)
bs.firstID = first.ID()
firstResult := unittest.ExecutionResultFixture(unittest.WithBlock(&first))
bs.lastSeal = unittest.Seal.Fixture(unittest.Seal.WithResult(firstResult))
bs.resultForBlock[firstResult.BlockID] = firstResult
bs.resultByID[firstResult.ID()] = firstResult
// Construct finalized blocks [F0] ... [F4]
previous := &first
for n := 0; n < numFinalizedBlocks; n++ {
finalized := bs.createAndRecordBlock(previous, n > 0) // Do not construct candidate seal for [first], as it is already sealed
bs.finalizedBlockIDs = append(bs.finalizedBlockIDs, finalized.ID())
previous = finalized
}
// Construct the last finalized block [final]
final := bs.createAndRecordBlock(previous, true)
bs.finalID = final.ID()
// Construct the pending (i.e. unfinalized) ancestors [A0], ..., [A3]
previous = final
for n := 0; n < numPendingBlocks; n++ {
pending := bs.createAndRecordBlock(previous, true)
bs.pendingBlockIDs = append(bs.pendingBlockIDs, pending.ID())
previous = pending
}
// Construct [parent] block; but do _not_ add candidate seal for its parent
parent := bs.createAndRecordBlock(previous, false)
bs.parentID = parent.ID()
// set up temporary database for tests
bs.db, bs.dir = unittest.TempBadgerDB(bs.T())
err := bs.db.Update(operation.InsertFinalizedHeight(final.Header.Height))
bs.Require().NoError(err)
err = bs.db.Update(operation.IndexBlockHeight(final.Header.Height, bs.finalID))
bs.Require().NoError(err)
err = bs.db.Update(operation.InsertRootHeight(13))
bs.Require().NoError(err)
err = bs.db.Update(operation.InsertSealedHeight(first.Header.Height))
bs.Require().NoError(err)
err = bs.db.Update(operation.IndexBlockHeight(first.Header.Height, first.ID()))
bs.Require().NoError(err)
bs.sentinel = 1337
bs.setter = func(header *flow.Header) error {
header.View = 1337
return nil
}
bs.state = &protocol.ParticipantState{}
bs.state.On("Extend", mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
block := args.Get(1).(*flow.Block)
bs.Assert().Equal(bs.sentinel, block.Header.View)
bs.assembled = block.Payload
}).Return(nil)
bs.state.On("Final").Return(func() realproto.Snapshot {
if block, ok := bs.blocks[bs.finalID]; ok {
snapshot := unittest.StateSnapshotForKnownBlock(block.Header, nil)
snapshot.On("Descendants").Return(bs.blockChildren[bs.finalID], nil)
return snapshot
}
return unittest.StateSnapshotForUnknownBlock()
})
// set up storage mocks for tests
bs.sealDB = &storage.Seals{}
bs.sealDB.On("HighestInFork", mock.Anything).Return(bs.lastSeal, nil)
bs.headerDB = &storage.Headers{}
bs.headerDB.On("ByBlockID", mock.Anything).Return(
func(blockID flow.Identifier) *flow.Header {
return bs.headers[blockID]
},
func(blockID flow.Identifier) error {
_, exists := bs.headers[blockID]
if !exists {
return storerr.ErrNotFound
}
return nil
},
)
bs.indexDB = &storage.Index{}
bs.indexDB.On("ByBlockID", mock.Anything).Return(
func(blockID flow.Identifier) *flow.Index {
return bs.index[blockID]
},
func(blockID flow.Identifier) error {
_, exists := bs.index[blockID]
if !exists {
return storerr.ErrNotFound
}
return nil
},
)
bs.blockDB = &storage.Blocks{}
bs.blockDB.On("ByID", mock.Anything).Return(
func(blockID flow.Identifier) *flow.Block {
return bs.blocks[blockID]
},
func(blockID flow.Identifier) error {
_, exists := bs.blocks[blockID]
if !exists {
return storerr.ErrNotFound
}
return nil
},
)
bs.resultDB = &storage.ExecutionResults{}
bs.resultDB.On("ByID", mock.Anything).Return(
func(resultID flow.Identifier) *flow.ExecutionResult {
return bs.resultByID[resultID]
},
func(resultID flow.Identifier) error {
_, exists := bs.resultByID[resultID]
if !exists {
return storerr.ErrNotFound
}
return nil
},
)
bs.receiptsDB = &storage.ExecutionReceipts{}
bs.receiptsDB.On("ByID", mock.Anything).Return(
func(receiptID flow.Identifier) *flow.ExecutionReceipt {
return bs.receiptsByID[receiptID]
},
func(receiptID flow.Identifier) error {
_, exists := bs.receiptsByID[receiptID]
if !exists {
return storerr.ErrNotFound
}
return nil
},
)
bs.receiptsDB.On("ByBlockID", mock.Anything).Return(
func(blockID flow.Identifier) flow.ExecutionReceiptList {
return bs.receiptsByBlockID[blockID]
},
func(blockID flow.Identifier) error {
_, exists := bs.receiptsByBlockID[blockID]
if !exists {
return storerr.ErrNotFound
}
return nil
},
)
// set up memory pool mocks for tests
bs.guarPool = &mempool.Guarantees{}
bs.guarPool.On("Size").Return(uint(0)) // only used by metrics
bs.guarPool.On("All").Return(
func() []*flow.CollectionGuarantee {
return bs.pendingGuarantees
},
)
bs.sealPool = &mempool.IncorporatedResultSeals{}
bs.sealPool.On("Size").Return(uint(0)) // only used by metrics
bs.sealPool.On("All").Return(
func() []*flow.IncorporatedResultSeal {
res := make([]*flow.IncorporatedResultSeal, 0, len(bs.pendingSeals))
for _, ps := range bs.pendingSeals {
res = append(res, ps)
}
return res
},
)
bs.sealPool.On("ByID", mock.Anything).Return(
func(id flow.Identifier) *flow.IncorporatedResultSeal {
return bs.pendingSeals[id]
},
func(id flow.Identifier) bool {
_, exists := bs.pendingSeals[id]
return exists
},
)
bs.recPool = &mempool.ExecutionTree{}
bs.recPool.On("PruneUpToHeight", mock.Anything).Return(nil).Maybe()
bs.recPool.On("Size").Return(uint(0)).Maybe() // used for metrics only
bs.recPool.On("AddResult", mock.Anything, mock.Anything).Return(nil).Maybe()
bs.recPool.On("AddReceipt", mock.Anything, mock.Anything).Return(false, nil).Maybe()
bs.recPool.On("ReachableReceipts", mock.Anything, mock.Anything, mock.Anything).Return(
func(resultID flow.Identifier, blockFilter mempoolAPIs.BlockFilter, receiptFilter mempoolAPIs.ReceiptFilter) []*flow.ExecutionReceipt {
return bs.pendingReceipts
},
nil,
)
// initialize the builder
bs.build, err = NewBuilder(
noopMetrics,
bs.db,
bs.state,
bs.headerDB,
bs.sealDB,
bs.indexDB,
bs.blockDB,
bs.resultDB,
bs.receiptsDB,
bs.guarPool,
bs.sealPool,
bs.recPool,
noopTracer,
)
require.NoError(bs.T(), err)
bs.build.cfg.expiry = 11
}
func (bs *BuilderSuite) TearDownTest() {
err := bs.db.Close()
bs.Assert().NoError(err)
err = os.RemoveAll(bs.dir)
bs.Assert().NoError(err)
}
func (bs *BuilderSuite) TestPayloadEmptyValid() {
// we should build an empty block with default setup
_, err := bs.build.BuildOn(bs.parentID, bs.setter)
bs.Require().NoError(err)
bs.Assert().Empty(bs.assembled.Guarantees, "should have no guarantees in payload with empty mempool")
bs.Assert().Empty(bs.assembled.Seals, "should have no seals in payload with empty mempool")
}
func (bs *BuilderSuite) TestPayloadGuaranteeValid() {
// add sixteen guarantees to the pool
bs.pendingGuarantees = unittest.CollectionGuaranteesFixture(16, unittest.WithCollRef(bs.finalID))
_, err := bs.build.BuildOn(bs.parentID, bs.setter)
bs.Require().NoError(err)
bs.Assert().ElementsMatch(bs.pendingGuarantees, bs.assembled.Guarantees, "should have guarantees from mempool in payload")
}
func (bs *BuilderSuite) TestPayloadGuaranteeDuplicate() {
// create some valid guarantees
valid := unittest.CollectionGuaranteesFixture(4, unittest.WithCollRef(bs.finalID))
forkBlocks := append(bs.finalizedBlockIDs, bs.pendingBlockIDs...)
// create some duplicate guarantees and add to random blocks on the fork
duplicated := unittest.CollectionGuaranteesFixture(12, unittest.WithCollRef(bs.finalID))
for _, guarantee := range duplicated {
blockID := forkBlocks[rand.Intn(len(forkBlocks))]
index := bs.index[blockID]
index.CollectionIDs = append(index.CollectionIDs, guarantee.ID())
bs.index[blockID] = index
}
// add sixteen guarantees to the pool
bs.pendingGuarantees = append(valid, duplicated...)
_, err := bs.build.BuildOn(bs.parentID, bs.setter)
bs.Require().NoError(err)
bs.Assert().ElementsMatch(valid, bs.assembled.Guarantees, "should have valid guarantees from mempool in payload")
}
func (bs *BuilderSuite) TestPayloadGuaranteeReferenceUnknown() {
// create 12 valid guarantees
valid := unittest.CollectionGuaranteesFixture(12, unittest.WithCollRef(bs.finalID))
// create 4 guarantees with unknown reference
unknown := unittest.CollectionGuaranteesFixture(4, unittest.WithCollRef(unittest.IdentifierFixture()))
// add all guarantees to the pool
bs.pendingGuarantees = append(valid, unknown...)
_, err := bs.build.BuildOn(bs.parentID, bs.setter)
bs.Require().NoError(err)
bs.Assert().ElementsMatch(valid, bs.assembled.Guarantees, "should have valid from mempool in payload")
}
func (bs *BuilderSuite) TestPayloadGuaranteeReferenceExpired() {
// create 12 valid guarantees
valid := unittest.CollectionGuaranteesFixture(12, unittest.WithCollRef(bs.finalID))
// create 4 expired guarantees
header := unittest.BlockHeaderFixture()
header.Height = bs.headers[bs.finalID].Height - 12
bs.headers[header.ID()] = header
expired := unittest.CollectionGuaranteesFixture(4, unittest.WithCollRef(header.ID()))
// add all guarantees to the pool
bs.pendingGuarantees = append(valid, expired...)
_, err := bs.build.BuildOn(bs.parentID, bs.setter)
bs.Require().NoError(err)
bs.Assert().ElementsMatch(valid, bs.assembled.Guarantees, "should have valid from mempool in payload")
}
// TestPayloadSeals_AllValid checks that builder seals as many blocks as possible (happy path):
//
// [first] <- [F0] <- [F1] <- [F2] <- [F3] <- [final] <- [A0] <- [A1] <- [A2] <- [A3] <- [parent]
//
// Where block
// - [first] is sealed and finalized
// - [F0] ... [F4] and [final] are finalized, unsealed blocks with candidate seals are included in mempool
// - [A0] ... [A2] are non-finalized, unsealed blocks with candidate seals are included in mempool
// - [A3] and [parent] are non-finalized, unsealed blocks _without_ candidate seals
//
// Expected behaviour:
// - builder should include seals [F0], ..., [A4]
// - note: Block [A3] will not have a seal in the happy path for the following reason:
// In our example, the result for block A3 is incorporated in block A4. But, for the verifiers to start
// their work, they need a child block of A4, because the child contains the source of randomness for
// A4. But we are just constructing this child right now. Hence, the verifiers couldn't have checked
// the result for A3.
func (bs *BuilderSuite) TestPayloadSeals_AllValid() {
// Populate seals mempool with valid chain of seals for blocks [F0], ..., [A2]
bs.pendingSeals = bs.irsMap
_, err := bs.build.BuildOn(bs.parentID, bs.setter)
bs.Require().NoError(err)
bs.Assert().Empty(bs.assembled.Guarantees, "should have no guarantees in payload with empty mempool")
bs.Assert().ElementsMatch(bs.chain, bs.assembled.Seals, "should have included valid chain of seals")
}
// TestPayloadSeals_Limit verifies that builder does not exceed maxSealLimit
func (bs *BuilderSuite) TestPayloadSeals_Limit() {
// use valid chain of seals in mempool
bs.pendingSeals = bs.irsMap
// change maxSealCount to one less than the number of items in the mempool
limit := uint(2)
bs.build.cfg.maxSealCount = limit
_, err := bs.build.BuildOn(bs.parentID, bs.setter)
bs.Require().NoError(err)
bs.Assert().Empty(bs.assembled.Guarantees, "should have no guarantees in payload with empty mempool")
bs.Assert().Equal(bs.chain[:limit], bs.assembled.Seals, "should have excluded seals above maxSealCount")
}
// TestPayloadSeals_OnlyFork checks that the builder only includes seals corresponding
// to blocks on the current fork (and _not_ seals for sealable blocks on other forks)
func (bs *BuilderSuite) TestPayloadSeals_OnlyFork() {
// in the test setup, we already created a single fork
// [first] <- [F0] <- [F1] <- [F2] <- [F3] <- [final] <- [A0] <- [A1] <- [A2] ..
// For this test, we add fork: ^
// └--- [B0] <- [B1] <- ....<- [B6] <- [B7]
// Where block
// * [first] is sealed and finalized
// * [F0] ... [F4] and [final] are finalized, unsealed blocks with candidate seals are included in mempool
// * [A0] ... [A2] are non-finalized, unsealed blocks with candidate seals are included in mempool
forkHead := bs.blocks[bs.finalID]
for i := 0; i < 8; i++ {
// Usually, the blocks [B6] and [B7] will not have candidate seal for the following reason:
// For the verifiers to start checking a result R, they need a source of randomness for the block _incorporating_
// result R. The result for block [B6] is incorporated in [B7], which does _not_ have a child yet.
forkHead = bs.createAndRecordBlock(forkHead, i < 6)
}
bs.pendingSeals = bs.irsMap
_, err := bs.build.BuildOn(forkHead.ID(), bs.setter)
bs.Require().NoError(err)
// expected seals: [F0] <- ... <- [final] <- [B0] <- ... <- [B5]
// Note: bs.chain contains seals for blocks [F0]...[A2] followed by seals for [final], [B0]...[B5]
bs.Assert().Equal(10, len(bs.assembled.Seals), "unexpected number of seals")
bs.Assert().ElementsMatch(bs.chain[:4], bs.assembled.Seals[:4], "should have included only valid chain of seals")
bs.Assert().ElementsMatch(bs.chain[8:], bs.assembled.Seals[4:], "should have included only valid chain of seals")
bs.Assert().Empty(bs.assembled.Guarantees, "should have no guarantees in payload with empty mempool")
}
// TestPayloadSeals_EnforceGap checks that builder leaves a 1-block gap between block incorporating the result
// and the block sealing the result. Without this gap, some nodes might not be able to compute the Verifier
// assignment for the seal and therefore reject the block. This edge case only occurs in a very specific situation:
//
// ┌---- [A5] (orphaned fork)
// v
// ...<- [B0] <- [B1] <- [B2] <- [B3] <- [B4{incorporates result R for B1}] <- ░newBlock░
//
// SCENARIO:
// - block B0 is sealed
// Proposer for ░newBlock░ knows block A5. Hence, it knows a QC for block B4, which contains the Source Of Randomness (SOR) for B4.
// Therefore, the proposer can construct the verifier assignment for [B4{incorporates result R for B1}]
// - Assume that verification was fast enough, so the proposer has sufficient approvals for result R.
// Therefore, the proposer has a candidate seal, sealing result R for block B4, in its mempool.
//
// Replica trying to verify ░newBlock░:
//
// - Assume that the replica does _not_ know A5. Therefore, it _cannot_ compute the verifier assignment for B4.
//
// Problem: If the proposer included the seal for B1, the replica could not check it.
// Solution: There must be a gap between the block incorporating the result (here B4) and
// the block sealing the result. A gap of one block is sufficient.
//
// ┌---- [A5] (orphaned fork)
// v
// ...<- [B0] <- [B1] <- [B2] <- [B3] <- [B4{incorporates result R for B1}] <- [B5] <- [B6{seals B1}]
// ~~~~~~
// gap
//
// We test the two distinct cases:
//
// (i) Builder does _not_ include seal for B1 when constructing block B5
// (ii) Builder _includes_ seal for B1 when constructing block B6
func (bs *BuilderSuite) TestPayloadSeals_EnforceGap() {
// we use bs.parentID as block B0
b0result := bs.resultForBlock[bs.parentID]
b0seal := unittest.Seal.Fixture(unittest.Seal.WithResult(b0result))
// create blocks B1 to B4:
b1 := bs.createAndRecordBlock(bs.blocks[bs.parentID], true)
bchain := unittest.ChainFixtureFrom(3, b1.Header) // creates blocks b2, b3, b4
b4 := bchain[2]
// Incorporate result for block B1 into payload of block B4
resultB1 := bs.resultForBlock[b1.ID()]
receiptB1 := unittest.ExecutionReceiptFixture(unittest.WithResult(resultB1))
b4.SetPayload(
flow.Payload{
Results: []*flow.ExecutionResult{&receiptB1.ExecutionResult},
Receipts: []*flow.ExecutionReceiptMeta{receiptB1.Meta()},
})
// add blocks B2, B3, B4, A5 to the mocked storage layer (block b0 and b1 are already added):
a5 := unittest.BlockWithParentFixture(b4.Header)
for _, b := range append(bchain, a5) {
bs.storeBlock(b)
}
// mock for of candidate seal mempool:
bs.pendingSeals = make(map[flow.Identifier]*flow.IncorporatedResultSeal)
b1seal := storeSealForIncorporatedResult(resultB1, b4.ID(), bs.pendingSeals)
// mock for seals storage layer:
bs.sealDB = &storage.Seals{}
bs.build.seals = bs.sealDB
bs.T().Run("Build on top of B4 and check that no seals are included", func(t *testing.T) {
bs.sealDB.On("HighestInFork", b4.ID()).Return(b0seal, nil)
_, err := bs.build.BuildOn(b4.ID(), bs.setter)
require.NoError(t, err)
bs.recPool.AssertExpectations(t)
require.Empty(t, bs.assembled.Seals, "should not include any seals")
})
bs.T().Run("Build on top of B5 and check that seals for B1 is included", func(t *testing.T) {
b5 := unittest.BlockWithParentFixture(b4.Header) // creating block b5
bs.storeBlock(b5)
bs.sealDB.On("HighestInFork", b5.ID()).Return(b0seal, nil)
_, err := bs.build.BuildOn(b5.ID(), bs.setter)
require.NoError(t, err)
bs.recPool.AssertExpectations(t)
require.Equal(t, 1, len(bs.assembled.Seals), "only seal for B1 expected")
require.Equal(t, b1seal.Seal, bs.assembled.Seals[0])
})
}
// TestPayloadSeals_Duplicates verifies that the builder does not duplicate seals for already sealed blocks:
//
// ... <- [F0] <- [F1] <- [F2] <- [F3] <- [A0] <- [A1] <- [A2] <- [A3]
//
// Where block
// - [F0] ... [F3] sealed blocks but their candidate seals are still included in mempool
// - [A0] ... [A3] unsealed blocks with candidate seals are included in mempool
//
// Expected behaviour:
// - builder should only include seals [A0], ..., [A3]
func (bs *BuilderSuite) TestPayloadSeals_Duplicate() {
// Pretend that the first n blocks are already sealed
n := 4
lastSeal := bs.chain[n-1]
mockSealDB := &storage.Seals{}
mockSealDB.On("HighestInFork", mock.Anything).Return(lastSeal, nil)
bs.build.seals = mockSealDB
// seals for all blocks [F0], ..., [A3] are still in the mempool:
bs.pendingSeals = bs.irsMap
_, err := bs.build.BuildOn(bs.parentID, bs.setter)
bs.Require().NoError(err)
bs.Assert().Equal(bs.chain[n:], bs.assembled.Seals, "should have rejected duplicate seals")
}
// TestPayloadSeals_MissingNextSeal checks how the builder handles the fork
//
// [S] <- [F0] <- [F1] <- [F2] <- [F3] <- [A0] <- [A1] <- [A2] <- [A3]
//
// Where block
// - [S] is sealed and finalized
// - [F0] finalized, unsealed block but _without_ candidate seal in mempool
// - [F1] ... [F3] are finalized, unsealed blocks with candidate seals are included in mempool
// - [A0] ... [A3] non-finalized, unsealed blocks with candidate seals are included in mempool
//
// Expected behaviour:
// - builder should not include any seals as the immediately next seal is not in mempool
func (bs *BuilderSuite) TestPayloadSeals_MissingNextSeal() {
// remove the seal for block [F0]
firstSeal := bs.irsList[0]
delete(bs.irsMap, firstSeal.ID())
bs.pendingSeals = bs.irsMap
_, err := bs.build.BuildOn(bs.parentID, bs.setter)
bs.Require().NoError(err)
bs.Assert().Empty(bs.assembled.Guarantees, "should have no guarantees in payload with empty mempool")
bs.Assert().Empty(bs.assembled.Seals, "should not have included any seals from cutoff chain")
}
// TestPayloadSeals_MissingInterimSeal checks how the builder handles the fork
//
// [S] <- [F0] <- [F1] <- [F2] <- [F3] <- [A0] <- [A1] <- [A2] <- [A3]
//
// Where block
// - [S] is sealed and finalized
// - [F0] ... [F2] are finalized, unsealed blocks with candidate seals are included in mempool
// - [F4] finalized, unsealed block but _without_ candidate seal in mempool
// - [A0] ... [A3] non-finalized, unsealed blocks with candidate seals are included in mempool
//
// Expected behaviour:
// - builder should only include candidate seals for [F0], [F1], [F2]
func (bs *BuilderSuite) TestPayloadSeals_MissingInterimSeal() {
// remove a seal for block [F4]
seal := bs.irsList[3]
delete(bs.irsMap, seal.ID())
bs.pendingSeals = bs.irsMap
_, err := bs.build.BuildOn(bs.parentID, bs.setter)
bs.Require().NoError(err)
bs.Assert().Empty(bs.assembled.Guarantees, "should have no guarantees in payload with empty mempool")
bs.Assert().ElementsMatch(bs.chain[:3], bs.assembled.Seals, "should have included only beginning of broken chain")
}
// TestValidatePayloadSeals_ExecutionForks checks how the builder's seal-inclusion logic
// handles execution forks.
//
// we have the chain in storage:
//
// F <- A{Result[F]_1, Result[F]_2, ReceiptMeta[F]_1, ReceiptMeta[F]_2}
// <- B{Result[A]_1, Result[A]_2, ReceiptMeta[A]_1, ReceiptMeta[A]_2}
// <- C{Result[B]_1, Result[B]_2, ReceiptMeta[B]_1, ReceiptMeta[B]_2}
// <- D{Seal for Result[F]_1}
//
// here F is the latest finalized block (with ID bs.finalID)
//
// Note that we are explicitly testing the handling of an execution fork that
// was incorporated _before_ the seal
//
// Blocks: F <----------- A <----------- B
// Results: Result[F]_1 <- Result[A]_1 <- Result[B]_1 :: the root of this execution tree is sealed
// Result[F]_2 <- Result[A]_2 <- Result[B]_2 :: the root of this execution tree conflicts with sealed result
//
// The builder is tasked with creating the payload for block X:
//
// F <- A{..} <- B{..} <- C{..} <- D{..} <- X
//
// We test the two distinct cases:
//
// (i) verify that execution fork conflicting with sealed result is not sealed
// (ii) verify that multiple execution forks are properly handled
func (bs *BuilderSuite) TestValidatePayloadSeals_ExecutionForks() {
bs.build.cfg.expiry = 4 // reduce expiry so collection dedup algorithm doesn't walk past [lastSeal]
blockF := bs.blocks[bs.finalID]
blocks := []*flow.Block{blockF}
blocks = append(blocks, unittest.ChainFixtureFrom(4, blockF.Header)...) // elements [F, A, B, C, D]
receiptChain1 := unittest.ReceiptChainFor(blocks, unittest.ExecutionResultFixture()) // elements [Result[F]_1, Result[A]_1, Result[B]_1, ...]
receiptChain2 := unittest.ReceiptChainFor(blocks, unittest.ExecutionResultFixture()) // elements [Result[F]_2, Result[A]_2, Result[B]_2, ...]
for i := 1; i <= 3; i++ { // set payload for blocks A, B, C
blocks[i].SetPayload(flow.Payload{
Results: []*flow.ExecutionResult{&receiptChain1[i-1].ExecutionResult, &receiptChain2[i-1].ExecutionResult},
Receipts: []*flow.ExecutionReceiptMeta{receiptChain1[i-1].Meta(), receiptChain2[i-1].Meta()},
})
}
sealedResult := receiptChain1[0].ExecutionResult
sealF := unittest.Seal.Fixture(unittest.Seal.WithResult(&sealedResult))
blocks[4].SetPayload(flow.Payload{ // set payload for block D
Seals: []*flow.Seal{sealF},
})
for i := 0; i <= 4; i++ {
// we need to run this several times, as in each iteration as we have _multiple_ execution chains.
// In each iteration, we only mange to reconnect one additional height
unittest.ReconnectBlocksAndReceipts(blocks, receiptChain1)
unittest.ReconnectBlocksAndReceipts(blocks, receiptChain2)
}
for _, b := range blocks {
bs.storeBlock(b)
}
bs.sealDB = &storage.Seals{}
bs.build.seals = bs.sealDB
bs.sealDB.On("HighestInFork", mock.Anything).Return(sealF, nil)
bs.resultByID[sealedResult.ID()] = &sealedResult
bs.T().Run("verify that execution fork conflicting with sealed result is not sealed", func(t *testing.T) {
bs.pendingSeals = make(map[flow.Identifier]*flow.IncorporatedResultSeal)
storeSealForIncorporatedResult(&receiptChain2[1].ExecutionResult, blocks[2].ID(), bs.pendingSeals)
_, err := bs.build.BuildOn(blocks[4].ID(), bs.setter)
require.NoError(t, err)
require.Empty(t, bs.assembled.Seals, "should not have included seal for conflicting execution fork")
})
bs.T().Run("verify that multiple execution forks are properly handled", func(t *testing.T) {
bs.pendingSeals = make(map[flow.Identifier]*flow.IncorporatedResultSeal)
sealResultA_1 := storeSealForIncorporatedResult(&receiptChain1[1].ExecutionResult, blocks[2].ID(), bs.pendingSeals)
sealResultB_1 := storeSealForIncorporatedResult(&receiptChain1[2].ExecutionResult, blocks[3].ID(), bs.pendingSeals)
storeSealForIncorporatedResult(&receiptChain2[1].ExecutionResult, blocks[2].ID(), bs.pendingSeals)
storeSealForIncorporatedResult(&receiptChain2[2].ExecutionResult, blocks[3].ID(), bs.pendingSeals)
_, err := bs.build.BuildOn(blocks[4].ID(), bs.setter)
require.NoError(t, err)
require.ElementsMatch(t, []*flow.Seal{sealResultA_1.Seal, sealResultB_1.Seal}, bs.assembled.Seals, "valid fork should have been sealed")
})
}
// TestPayloadReceipts_TraverseExecutionTreeFromLastSealedResult tests the receipt selection:
// Expectation: Builder should trigger ExecutionTree to search Execution Tree from
// last sealed result on respective fork.
//
// We test with the following main chain tree
//
// ┌-[X0] <- [X1{seals ..F4}]
// v
// [lastSeal] <- [F0] <- [F1] <- [F2] <- [F3] <- [F4] <- [A0] <- [A1{seals ..F2}] <- [A2] <- [A3]
//
// Where
// * blocks [lastSeal], [F1], ... [F4], [A0], ... [A4], are created by BuilderSuite
// * latest sealed block for a specific fork is provided by test-local seals storage mock
func (bs *BuilderSuite) TestPayloadReceipts_TraverseExecutionTreeFromLastSealedResult() {
bs.build.cfg.expiry = 4 // reduce expiry so collection dedup algorithm doesn't walk past [lastSeal]
x0 := bs.createAndRecordBlock(bs.blocks[bs.finalID], true)
x1 := bs.createAndRecordBlock(x0, true)
// set last sealed blocks:
f2 := bs.blocks[bs.finalizedBlockIDs[2]]
f2eal := unittest.Seal.Fixture(unittest.Seal.WithResult(bs.resultForBlock[f2.ID()]))
f4Seal := unittest.Seal.Fixture(unittest.Seal.WithResult(bs.resultForBlock[bs.finalID]))
bs.sealDB = &storage.Seals{}
bs.build.seals = bs.sealDB
// reset receipts mempool to verify calls made by Builder
bs.recPool = &mempool.ExecutionTree{}
bs.recPool.On("Size").Return(uint(0)).Maybe()
bs.build.recPool = bs.recPool
// building on top of X0: latest finalized block in fork is [lastSeal]; expect search to start with sealed result
bs.sealDB.On("HighestInFork", x0.ID()).Return(bs.lastSeal, nil)
bs.recPool.On("ReachableReceipts", bs.lastSeal.ResultID, mock.Anything, mock.Anything).Return([]*flow.ExecutionReceipt{}, nil).Once()
_, err := bs.build.BuildOn(x0.ID(), bs.setter)
bs.Require().NoError(err)
bs.recPool.AssertExpectations(bs.T())
// building on top of X1: latest finalized block in fork is [F4]; expect search to start with sealed result
bs.sealDB.On("HighestInFork", x1.ID()).Return(f4Seal, nil)
bs.recPool.On("ReachableReceipts", f4Seal.ResultID, mock.Anything, mock.Anything).Return([]*flow.ExecutionReceipt{}, nil).Once()
_, err = bs.build.BuildOn(x1.ID(), bs.setter)
bs.Require().NoError(err)
bs.recPool.AssertExpectations(bs.T())
// building on top of A3 (with ID bs.parentID): latest finalized block in fork is [F4]; expect search to start with sealed result
bs.sealDB.On("HighestInFork", bs.parentID).Return(f2eal, nil)
bs.recPool.On("ReachableReceipts", f2eal.ResultID, mock.Anything, mock.Anything).Return([]*flow.ExecutionReceipt{}, nil).Once()
_, err = bs.build.BuildOn(bs.parentID, bs.setter)
bs.Require().NoError(err)
bs.recPool.AssertExpectations(bs.T())
}
// TestPayloadReceipts_IncludeOnlyReceiptsForCurrentFork tests the receipt selection:
// In this test, we check that the Builder provides a BlockFilter which only allows
// blocks on the fork, which we are extending. We construct the following chain tree:
//
// ┌--[X1] ┌-[Y2] ┌-- [A6]
// v v v
// <- [Final] <- [*B1*] <- [*B2*] <- [*B3*] <- [*B4{seals B1}*] <- [*B5*] <- ░newBlock░
// ^
// └-- [C3] <- [C4]
// ^--- [D4]
//
// Expectation: BlockFilter should pass blocks marked with star: B1, ... ,B5
// All other blocks should be filtered out.
//
// Context:
// While the receipt selection itself is performed by the ExecutionTree, the Builder
// controls the selection by providing suitable BlockFilter and ReceiptFilter.
func (bs *BuilderSuite) TestPayloadReceipts_IncludeOnlyReceiptsForCurrentFork() {
b1 := bs.createAndRecordBlock(bs.blocks[bs.finalID], true)
b2 := bs.createAndRecordBlock(b1, true)
b3 := bs.createAndRecordBlock(b2, true)
b4 := bs.createAndRecordBlock(b3, true)
b5 := bs.createAndRecordBlock(b4, true)
x1 := bs.createAndRecordBlock(bs.blocks[bs.finalID], true)
y2 := bs.createAndRecordBlock(b1, true)
a6 := bs.createAndRecordBlock(b5, true)
c3 := bs.createAndRecordBlock(b2, true)
c4 := bs.createAndRecordBlock(c3, true)
d4 := bs.createAndRecordBlock(c3, true)
// set last sealed blocks:
b1Seal := unittest.Seal.Fixture(unittest.Seal.WithResult(bs.resultForBlock[b1.ID()]))
bs.sealDB = &storage.Seals{}
bs.sealDB.On("HighestInFork", b5.ID()).Return(b1Seal, nil)
bs.build.seals = bs.sealDB
// setup mock to test the BlockFilter provided by Builder
bs.recPool = &mempool.ExecutionTree{}
bs.recPool.On("Size").Return(uint(0)).Maybe()
bs.recPool.On("ReachableReceipts", b1Seal.ResultID, mock.Anything, mock.Anything).Run(
func(args mock.Arguments) {
blockFilter := args[1].(mempoolAPIs.BlockFilter)
for _, h := range []*flow.Header{b1.Header, b2.Header, b3.Header, b4.Header, b5.Header} {
assert.True(bs.T(), blockFilter(h))
}
for _, h := range []*flow.Header{bs.blocks[bs.finalID].Header, x1.Header, y2.Header, a6.Header, c3.Header, c4.Header, d4.Header} {
assert.False(bs.T(), blockFilter(h))
}
}).Return([]*flow.ExecutionReceipt{}, nil).Once()
bs.build.recPool = bs.recPool
_, err := bs.build.BuildOn(b5.ID(), bs.setter)
bs.Require().NoError(err)
bs.recPool.AssertExpectations(bs.T())
}
// TestPayloadReceipts_SkipDuplicatedReceipts tests the receipt selection:
// Expectation: we check that the Builder provides a ReceiptFilter which
// filters out duplicated receipts.
// Comment:
// While the receipt selection itself is performed by the ExecutionTree, the Builder
// controls the selection by providing suitable BlockFilter and ReceiptFilter.
func (bs *BuilderSuite) TestPayloadReceipts_SkipDuplicatedReceipts() {
// setup mock to test the ReceiptFilter provided by Builder
bs.recPool = &mempool.ExecutionTree{}
bs.recPool.On("Size").Return(uint(0)).Maybe()
bs.recPool.On("ReachableReceipts", bs.lastSeal.ResultID, mock.Anything, mock.Anything).Run(
func(args mock.Arguments) {
receiptFilter := args[2].(mempoolAPIs.ReceiptFilter)
// verify that all receipts already included in blocks are filtered out:
for _, block := range bs.blocks {
resultByID := block.Payload.Results.Lookup()
for _, meta := range block.Payload.Receipts {
result := resultByID[meta.ResultID]
rcpt := flow.ExecutionReceiptFromMeta(*meta, *result)
assert.False(bs.T(), receiptFilter(rcpt))
}
}
// Verify that receipts for unsealed blocks, which are _not_ already incorporated are accepted:
for _, block := range bs.blocks {
if block.ID() != bs.firstID { // block with ID bs.firstID is already sealed
rcpt := unittest.ReceiptForBlockFixture(block)
assert.True(bs.T(), receiptFilter(rcpt))
}
}
}).Return([]*flow.ExecutionReceipt{}, nil).Once()
bs.build.recPool = bs.recPool
_, err := bs.build.BuildOn(bs.parentID, bs.setter)
bs.Require().NoError(err)
bs.recPool.AssertExpectations(bs.T())
}
// TestPayloadReceipts_SkipReceiptsForSealedBlock tests the receipt selection:
// Expectation: we check that the Builder provides a ReceiptFilter which
// filters out _any_ receipt for the sealed block.
//
// Comment:
// While the receipt selection itself is performed by the ExecutionTree, the Builder
// controls the selection by providing suitable BlockFilter and ReceiptFilter.
func (bs *BuilderSuite) TestPayloadReceipts_SkipReceiptsForSealedBlock() {
// setup mock to test the ReceiptFilter provided by Builder
bs.recPool = &mempool.ExecutionTree{}
bs.recPool.On("Size").Return(uint(0)).Maybe()
bs.recPool.On("ReachableReceipts", bs.lastSeal.ResultID, mock.Anything, mock.Anything).Run(
func(args mock.Arguments) {
receiptFilter := args[2].(mempoolAPIs.ReceiptFilter)
// receipt for sealed block committing to same result as the sealed result
rcpt := unittest.ExecutionReceiptFixture(unittest.WithResult(bs.resultForBlock[bs.firstID]))
assert.False(bs.T(), receiptFilter(rcpt))
// receipt for sealed block committing to different result as the sealed result