diff --git a/fn/func.go b/fn/func.go index 0514fd230..6880e65db 100644 --- a/fn/func.go +++ b/fn/func.go @@ -130,6 +130,18 @@ func All[T any](xs []T, pred func(T) bool) bool { return true } +// AllMapItems returns true if the passed predicate returns true for all items +// in the map. +func AllMapItems[T any, K comparable](xs map[K]T, pred func(T) bool) bool { + for i := range xs { + if !pred(xs[i]) { + return false + } + } + + return true +} + // Any returns true if the passed predicate returns true for any item in the // slice. func Any[T any](xs []T, pred func(T) bool) bool { @@ -148,6 +160,30 @@ func None[T any](xs []T, pred func(T) bool) bool { return !Any(xs, pred) } +// AnyMapItem returns true if the passed predicate returns true for any item in +// the map. +func AnyMapItem[T any, K comparable](xs map[K]T, pred func(T) bool) bool { + for i := range xs { + if pred(xs[i]) { + return true + } + } + + return false +} + +// NotAny returns true if the passed predicate returns false for all items in +// the slice. +func NotAny[T any](xs []T, pred func(T) bool) bool { + return !Any(xs, pred) +} + +// NotAnyMapItem returns true if the passed predicate returns false for all +// items in the map. +func NotAnyMapItem[T any, K comparable](xs map[K]T, pred func(T) bool) bool { + return !AnyMapItem(xs, pred) +} + // Count returns the number of items in the slice that match the predicate. func Count[T any](xs []T, pred func(T) bool) int { var count int @@ -161,6 +197,20 @@ func Count[T any](xs []T, pred func(T) bool) int { return count } +// CountMapItems returns the number of items in the map that match the +// predicate. +func CountMapItems[T any, K comparable](xs map[K]T, pred func(T) bool) int { + var count int + + for i := range xs { + if pred(xs[i]) { + count++ + } + } + + return count +} + // First returns the first item in the slice that matches the predicate, or an // error if none matches. func First[T any](xs []*T, pred func(*T) bool) (*T, error) { diff --git a/fn/iter.go b/fn/iter.go index 9e7c8dc23..c82d41639 100644 --- a/fn/iter.go +++ b/fn/iter.go @@ -7,10 +7,8 @@ package fn // This function can be used instead of the normal range loop to ensure that a // loop scoping bug isn't introduced. func ForEachErr[T any](s []T, f func(T) error) error { - for _, item := range s { - item := item - - if err := f(item); err != nil { + for i := range s { + if err := f(s[i]); err != nil { return err } } @@ -22,9 +20,17 @@ func ForEachErr[T any](s []T, f func(T) error) error { // This can be used to ensure that any normal for-loop don't run into bugs due // to loop variable scoping. func ForEach[T any](items []T, f func(T)) { - for _, item := range items { - item := item - f(item) + for i := range items { + f(items[i]) + } +} + +// ForEachMapItem is a generic implementation of a for-each (map with side +// effects). This can be used to ensure that any normal for-loop don't run into +// bugs due to loop variable scoping. +func ForEachMapItem[T any, K comparable](items map[K]T, f func(T)) { + for i := range items { + f(items[i]) } } @@ -38,6 +44,14 @@ func Enumerate[T any](items []T, f func(int, T)) { } } +// EnumerateMap is a generic enumeration function. The closure will be called +// for each key and item in the passed-in map. +func EnumerateMap[T any, K comparable](items map[K]T, f func(K, T)) { + for key := range items { + f(key, items[key]) + } +} + // MakeSlice is a generic function shorthand for making a slice out of a set // of elements. This can be used to avoid having to specify the type of the // slice as well as the types of the elements. diff --git a/itest/addrs_test.go b/itest/addrs_test.go index b04f556aa..b03ddfbc9 100644 --- a/itest/addrs_test.go +++ b/itest/addrs_test.go @@ -8,6 +8,7 @@ import ( tap "github.com/lightninglabs/taproot-assets" "github.com/lightninglabs/taproot-assets/fn" "github.com/lightninglabs/taproot-assets/internal/test" + "github.com/lightninglabs/taproot-assets/proof" "github.com/lightninglabs/taproot-assets/tappsbt" "github.com/lightninglabs/taproot-assets/taprpc" wrpc "github.com/lightninglabs/taproot-assets/taprpc/assetwalletrpc" @@ -40,10 +41,6 @@ func testAddresses(t *harnessTest) { // assets made above. secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = len(rpcAssets) - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) @@ -79,12 +76,6 @@ func testAddresses(t *harnessTest) { // Eventually the event should be marked as confirmed. AssertAddrEvent(t.t, secondTapd, addr, 1, statusConfirmed) - // To complete the transfer, we'll export the proof from the - // sender and import it into the receiver for each asset set. - sendProof( - t, t.tapd, secondTapd, addr.ScriptKey, a.AssetGenesis, - ) - // Make sure we have imported and finalized all proofs. AssertNonInteractiveRecvComplete(t.t, secondTapd, idx+1) @@ -175,10 +166,6 @@ func testMultiAddress(t *harnessTest) { alice := t.tapd bob := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = alice - params.startupSyncNumAssets = len(rpcAssets) - }, ) defer func() { require.NoError(t.t, bob.stop(!*noDelete)) @@ -195,7 +182,12 @@ func testMultiAddress(t *harnessTest) { func testAddressAssetSyncer(t *harnessTest) { // We'll kick off the test by making a new node, without hooking it up // to any existing Universe server. - bob := setupTapdHarness(t.t, t, t.lndHarness.Bob, nil) + bob := setupTapdHarness( + t.t, t, t.lndHarness.Bob, t.universeServer, + func(params *tapdHarnessParams) { + params.noDefaultUniverseSync = true + }, + ) defer func() { require.NoError(t.t, bob.stop(!*noDelete)) }() @@ -321,8 +313,9 @@ func testAddressAssetSyncer(t *harnessTest) { restartBobNoUniSync := func(disableSyncer bool) { require.NoError(t.t, bob.stop(!*noDelete)) bob = setupTapdHarness( - t.t, t, t.lndHarness.Bob, nil, + t.t, t, t.lndHarness.Bob, t.universeServer, func(params *tapdHarnessParams) { + params.noDefaultUniverseSync = true params.addrAssetSyncerDisable = disableSyncer }, ) @@ -436,13 +429,11 @@ func runMultiSendTest(ctxt context.Context, t *harnessTest, alice, // In order to force a split, we don't try to send the full asset. const sendAmt = 100 - var bobAddresses []*taprpc.Addr bobAddr1, err := bob.NewAddr(ctxt, &taprpc.NewAddrRequest{ AssetId: genInfo.AssetId, Amt: sendAmt, }) require.NoError(t.t, err) - bobAddresses = append(bobAddresses, bobAddr1) AssertAddrCreated(t.t, bob, mintedAsset, bobAddr1) bobAddr2, err := bob.NewAddr(ctxt, &taprpc.NewAddrRequest{ @@ -450,7 +441,6 @@ func runMultiSendTest(ctxt context.Context, t *harnessTest, alice, Amt: sendAmt, }) require.NoError(t.t, err) - bobAddresses = append(bobAddresses, bobAddr2) AssertAddrCreated(t.t, bob, mintedAsset, bobAddr2) // To test that Alice can also receive to multiple addresses in a single @@ -492,14 +482,6 @@ func runMultiSendTest(ctxt context.Context, t *harnessTest, alice, // this point, so the status should go to completed directly. AssertAddrEventByStatus(t.t, alice, statusCompleted, numRuns*2) - // To complete the transfer, we'll export the proof from the sender and - // import it into the receiver for each asset set. This should not be - // necessary for the sends to Alice, as she is both the sender and - // receiver and should detect the local proof once it's written to disk. - for i := range bobAddresses { - sendProof(t, alice, bob, bobAddresses[i].ScriptKey, genInfo) - } - // Make sure we have imported and finalized all proofs. AssertNonInteractiveRecvComplete(t.t, bob, numRuns*2) AssertNonInteractiveRecvComplete(t.t, alice, numRuns*2) @@ -531,6 +513,8 @@ func runMultiSendTest(ctxt context.Context, t *harnessTest, alice, require.NoError(t.t, err) } +// sendProof manually exports a proof from the given source node and imports it +// using the development only ImportProof RPC on the destination node. func sendProof(t *harnessTest, src, dst *tapdHarness, scriptKey []byte, genInfo *taprpc.GenesisInfo) *tapdevrpc.ImportProofResponse { @@ -562,6 +546,85 @@ func sendProof(t *harnessTest, src, dst *tapdHarness, scriptKey []byte, return importResp } +// sendProofUniRPC manually exports a proof from the given source node and +// imports it using the universe related InsertProof RPC on the destination +// node. +func sendProofUniRPC(t *harnessTest, src, dst *tapdHarness, scriptKey []byte, + genInfo *taprpc.GenesisInfo) *unirpc.AssetProofResponse { + + ctxb := context.Background() + + var proofResp *taprpc.ProofFile + waitErr := wait.NoError(func() error { + resp, err := src.ExportProof(ctxb, &taprpc.ExportProofRequest{ + AssetId: genInfo.AssetId, + ScriptKey: scriptKey, + }) + if err != nil { + return err + } + + proofResp = resp + return nil + }, defaultWaitTimeout) + require.NoError(t.t, waitErr) + + t.Logf("Importing proof %x using InsertProof", proofResp.RawProofFile) + + f := proof.File{} + err := f.Decode(bytes.NewReader(proofResp.RawProofFile)) + require.NoError(t.t, err) + + lastProof, err := f.LastProof() + require.NoError(t.t, err) + + var lastProofBytes bytes.Buffer + err = lastProof.Encode(&lastProofBytes) + require.NoError(t.t, err) + asset := lastProof.Asset + + proofType := universe.ProofTypeTransfer + if asset.IsGenesisAsset() { + proofType = universe.ProofTypeIssuance + } + + uniID := universe.Identifier{ + AssetID: asset.ID(), + ProofType: proofType, + } + if asset.GroupKey != nil { + uniID.GroupKey = &asset.GroupKey.GroupPubKey + } + + rpcUniID, err := tap.MarshalUniID(uniID) + require.NoError(t.t, err) + + outpoint := &unirpc.Outpoint{ + HashStr: lastProof.AnchorTx.TxHash().String(), + Index: int32(lastProof.InclusionProof.OutputIndex), + } + + importResp, err := dst.InsertProof(ctxb, &unirpc.AssetProof{ + Key: &unirpc.UniverseKey{ + Id: rpcUniID, + LeafKey: &unirpc.AssetKey{ + Outpoint: &unirpc.AssetKey_Op{ + Op: outpoint, + }, + ScriptKey: &unirpc.AssetKey_ScriptKeyBytes{ + ScriptKeyBytes: scriptKey, + }, + }, + }, + AssetLeaf: &unirpc.AssetLeaf{ + Proof: lastProofBytes.Bytes(), + }, + }) + require.NoError(t.t, err) + + return importResp +} + // sendAssetsToAddr spends the given input asset and sends the amount specified // in the address to the Taproot output derived from the address. func sendAssetsToAddr(t *harnessTest, sender *tapdHarness, diff --git a/itest/aperture_harness.go b/itest/aperture_harness.go index 95500363a..e95061e97 100644 --- a/itest/aperture_harness.go +++ b/itest/aperture_harness.go @@ -27,7 +27,7 @@ type ApertureHarness struct { // NewApertureHarness creates a new instance of the aperture service. It returns // a harness which includes useful values for testing. -func NewApertureHarness(t *testing.T, port int) ApertureHarness { +func NewApertureHarness(t *testing.T, port int) *ApertureHarness { // Create a temporary directory for the aperture service to use. baseDir := filepath.Join(t.TempDir(), "aperture") err := os.MkdirAll(baseDir, os.ModePerm) @@ -55,7 +55,7 @@ func NewApertureHarness(t *testing.T, port int) ApertureHarness { } service := aperture.NewAperture(cfg) - return ApertureHarness{ + return &ApertureHarness{ ListenAddr: listenAddr, Service: service, } diff --git a/itest/assertions.go b/itest/assertions.go index 359ac508e..048a397ec 100644 --- a/itest/assertions.go +++ b/itest/assertions.go @@ -870,7 +870,7 @@ func AssertBalanceByID(t *testing.T, client taprpc.TaprootAssetsClient, } require.True(t, ok) - require.Equal(t, uint64(amt), uint64(balance.Balance)) + require.Equal(t, amt, balance.Balance) } // AssertBalanceByGroup asserts that the balance of a single asset group diff --git a/itest/collectible_split_test.go b/itest/collectible_split_test.go index f8fd17035..2f987e8e7 100644 --- a/itest/collectible_split_test.go +++ b/itest/collectible_split_test.go @@ -53,10 +53,6 @@ func testCollectibleSend(t *harnessTest) { // serve as the node which'll receive the assets. secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = len(rpcAssets) - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) diff --git a/itest/full_value_split_test.go b/itest/full_value_split_test.go index 75854b0ca..84afb2430 100644 --- a/itest/full_value_split_test.go +++ b/itest/full_value_split_test.go @@ -33,10 +33,6 @@ func testFullValueSend(t *harnessTest) { // serve as the node which'll receive the assets. secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = len(rpcAssets) - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) @@ -88,8 +84,8 @@ func runFullValueSendTests(ctxt context.Context, t *harnessTest, alice, []uint64{0, fullAmount}, senderTransferIdx, senderTransferIdx+1, ) - _ = sendProof( - t, alice, bob, receiverAddr.ScriptKey, genInfo, + AssertNonInteractiveRecvComplete( + t.t, bob, senderTransferIdx+1, ) senderTransferIdx++ } else { @@ -108,8 +104,8 @@ func runFullValueSendTests(ctxt context.Context, t *harnessTest, alice, genInfo.AssetId, []uint64{0, fullAmount}, receiverTransferIdx, receiverTransferIdx+1, ) - _ = sendProof( - t, bob, alice, receiverAddr.ScriptKey, genInfo, + AssertNonInteractiveRecvComplete( + t.t, alice, receiverTransferIdx+1, ) receiverTransferIdx++ } diff --git a/itest/integration_test.go b/itest/integration_test.go index 41dcf7f81..3f2557c21 100644 --- a/itest/integration_test.go +++ b/itest/integration_test.go @@ -59,11 +59,10 @@ func TestTaprootAssetsDaemon(t *testing.T) { // The universe server and tapd client are both freshly // created and later discarded for each test run to // assure no state is taken over between runs. - tapdHarness, universeServer, proofCourier := - setupHarnesses( - t1, ht, lndHarness, - testCase.proofCourierType, - ) + tapdHarness, uniHarness, proofCourier := setupHarnesses( + t1, ht, lndHarness, + testCase.proofCourierType, + ) lndHarness.EnsureConnected( lndHarness.Alice, lndHarness.Bob, ) @@ -72,8 +71,8 @@ func TestTaprootAssetsDaemon(t *testing.T) { lndHarness.Bob.AddToLogf(logLine) ht := ht.newHarnessTest( - t1, lndHarness, universeServer, - tapdHarness, proofCourier, + t1, lndHarness, uniHarness, tapdHarness, + proofCourier, ) // Now we have everything to run the test case. diff --git a/itest/mint_batch_stress_test.go b/itest/mint_batch_stress_test.go index b4103ebc1..124ae44ea 100644 --- a/itest/mint_batch_stress_test.go +++ b/itest/mint_batch_stress_test.go @@ -57,7 +57,7 @@ func testMintBatchNStressTest(t *harnessTest, batchSize int, // If we create a second tapd instance and sync the universe state, // the synced tree should match the source tree. bob := setupTapdHarness( - t.t, t, t.lndHarness.Bob, nil, + t.t, t, t.lndHarness.Bob, t.universeServer, ) defer func() { require.NoError(t.t, bob.stop(!*noDelete)) diff --git a/itest/multi_asset_group_test.go b/itest/multi_asset_group_test.go index 9b4add64d..3dae13188 100644 --- a/itest/multi_asset_group_test.go +++ b/itest/multi_asset_group_test.go @@ -97,10 +97,6 @@ func testMintMultiAssetGroups(t *harnessTest) { // ensure that they can be sent and received correctly. secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = 4 - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) @@ -129,10 +125,6 @@ func testMintMultiAssetGroups(t *harnessTest) { normalMember.AssetGenesis.AssetId, []uint64{0, normalMember.Amount}, 0, 1, ) - _ = sendProof( - t, t.tapd, secondTapd, bobNormalAddr.ScriptKey, - normalMemberGenInfo, - ) AssertNonInteractiveRecvComplete(t.t, secondTapd, 1) AssertBalanceByGroup( @@ -170,10 +162,6 @@ func testMintMultiAssetGroups(t *harnessTest) { collectMember.AssetGenesis.AssetId, []uint64{0, collectMember.Amount}, 1, 2, ) - sendProof( - t, t.tapd, secondTapd, bobCollectAddr.ScriptKey, - collectMemberGenInfo, - ) AssertNonInteractiveRecvComplete(t.t, secondTapd, 2) AssertBalanceByGroup( @@ -333,10 +321,6 @@ func testMultiAssetGroupSend(t *harnessTest) { // assets made above. secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = groupCount - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) diff --git a/itest/psbt_test.go b/itest/psbt_test.go index d587f2097..245ac0566 100644 --- a/itest/psbt_test.go +++ b/itest/psbt_test.go @@ -44,10 +44,6 @@ func testPsbtScriptHashLockSend(t *harnessTest) { // serve as the node which'll receive the assets. secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = len(rpcAssets) - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) @@ -132,6 +128,10 @@ func testPsbtScriptHashLockSend(t *harnessTest) { t.t, t.lndHarness.Miner.Client, bob, sendResp, genInfo.AssetId, []uint64{numUnits / 2, numUnits / 2}, 0, 1, ) + + // This is an interactive/PSBT based transfer, so we do need to manually + // send the proof from the sender to the receiver because the proof + // courier address gets lost in the address->PSBT conversion. _ = sendProof(t, bob, alice, aliceAddr.ScriptKey, genInfo) AssertNonInteractiveRecvComplete(t.t, alice, 1) @@ -166,10 +166,6 @@ func testPsbtScriptCheckSigSend(t *harnessTest) { // serve as the node which'll receive the assets. secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = len(rpcAssets) - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) @@ -258,6 +254,10 @@ func testPsbtScriptCheckSigSend(t *harnessTest) { t.t, t.lndHarness.Miner.Client, bob, sendResp, genInfo.AssetId, []uint64{numUnits / 2, numUnits / 2}, 0, 1, ) + + // This is an interactive/PSBT based transfer, so we do need to manually + // send the proof from the sender to the receiver because the proof + // courier address gets lost in the address->PSBT conversion. _ = sendProof(t, bob, alice, aliceAddr.ScriptKey, genInfo) AssertNonInteractiveRecvComplete(t.t, alice, 1) @@ -307,10 +307,6 @@ func testPsbtNormalInteractiveFullValueSend(t *harnessTest) { // serve as the node which'll receive the assets. secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = len(rpcAssets) - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) @@ -358,10 +354,6 @@ func testPsbtGroupedInteractiveFullValueSend(t *harnessTest) { // serve as the node which'll receive the assets. secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = len(rpcAssets) - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) @@ -438,6 +430,9 @@ func runPsbtInteractiveFullValueSendTest(ctxt context.Context, t *harnessTest, sendResp, genInfo.AssetId, amounts, i/2, (i/2)+1, numOutputs, ) + + // This is an interactive transfer, so we do need to manually + // send the proof from the sender to the receiver. _ = sendProof( t, sender, receiver, receiverScriptKey.PubKey.SerializeCompressed(), genInfo, @@ -515,10 +510,6 @@ func testPsbtNormalInteractiveSplitSend(t *harnessTest) { // serve as the node which'll receive the assets. secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = len(rpcAssets) - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) @@ -566,10 +557,6 @@ func testPsbtGroupedInteractiveSplitSend(t *harnessTest) { // serve as the node which'll receive the assets. secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = len(rpcAssets) - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) @@ -656,6 +643,9 @@ func runPsbtInteractiveSplitSendTest(ctxt context.Context, t *harnessTest, []uint64{sendAmt, changeAmt}, i/2, (i/2)+1, numOutputs, ) + + // This is an interactive transfer, so we do need to manually + // send the proof from the sender to the receiver. _ = sendProof( t, sender, receiver, receiverScriptKey.PubKey.SerializeCompressed(), genInfo, @@ -721,10 +711,6 @@ func testPsbtInteractiveTapscriptSibling(t *harnessTest) { // serve as the node which'll receive the assets. secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(p *tapdHarnessParams) { - p.startupSyncNode = t.tapd - p.startupSyncNumAssets = len(rpcAssets) - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) @@ -779,6 +765,9 @@ func testPsbtInteractiveTapscriptSibling(t *harnessTest) { t.t, t.lndHarness.Miner.Client, alice, sendResp, genInfo.AssetId, []uint64{sendAmt, changeAmt}, 0, 1, 2, ) + + // This is an interactive transfer, so we do need to manually send the + // proof from the sender to the receiver. _ = sendProof( t, alice, bob, receiverScriptKey.PubKey.SerializeCompressed(), genInfo, @@ -846,10 +835,6 @@ func testPsbtMultiSend(t *harnessTest) { // serve as the node which'll receive the assets. secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = len(rpcAssets) - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) @@ -927,6 +912,9 @@ func testPsbtMultiSend(t *harnessTest) { t.t, t.lndHarness.Miner.Client, sender, sendResp, genInfo.AssetId, outputAmounts, 0, 1, numOutputs, ) + + // This is an interactive transfer, so we do need to manually send the + // proof from the sender to the receiver. _ = sendProof( t, sender, receiver, receiverScriptKey1.PubKey.SerializeCompressed(), genInfo, @@ -1034,7 +1022,6 @@ func sendToTapscriptAddr(ctx context.Context, t *harnessTest, alice, t.t, t.lndHarness.Miner.Client, alice, sendResp, genInfo.AssetId, []uint64{changeUnits, numUnits}, 0, 1, ) - _ = sendProof(t, alice, bob, bobAddr.ScriptKey, genInfo) AssertNonInteractiveRecvComplete(t.t, bob, 1) } @@ -1059,7 +1046,6 @@ func sendAssetAndAssert(ctx context.Context, t *harnessTest, alice, genInfo.AssetId, []uint64{change, numUnits}, outTransferIdx, numOutTransfers, ) - _ = sendProof(t, alice, bob, bobAddr.ScriptKey, genInfo) // There are now two receive events (since only non-interactive sends // appear in that RPC output). diff --git a/itest/re-issuance_test.go b/itest/re-issuance_test.go index b92d27b85..a043fb1df 100644 --- a/itest/re-issuance_test.go +++ b/itest/re-issuance_test.go @@ -51,13 +51,8 @@ func testReIssuance(t *harnessTest) { // Create a second node, which will have no information about previously // minted assets or asset groups. - numTotalAssets := len(normalGroupGen) + len(collectGroupGen) secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = numTotalAssets - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) @@ -78,10 +73,7 @@ func testReIssuance(t *harnessTest) { t.t, t.lndHarness.Miner.Client, t.tapd, firstCollectSend, collectGenInfo.AssetId, []uint64{0, 1}, 0, 1, ) - sendProof( - t, t.tapd, secondTapd, collectGroupAddr.ScriptKey, - collectGenInfo, - ) + AssertNonInteractiveRecvComplete(t.t, secondTapd, 1) // Check the state of both nodes. The first node should show one // zero-value transfer representing the send of the collectible. @@ -107,10 +99,7 @@ func testReIssuance(t *harnessTest) { normalGenInfo.AssetId, []uint64{normalGroupMintHalf, normalGroupMintHalf}, 1, 2, ) - sendProof( - t, t.tapd, secondTapd, normalGroupAddr.ScriptKey, - normalGenInfo, - ) + AssertNonInteractiveRecvComplete(t.t, secondTapd, 2) // Reissue one more collectible and half the original mint amount for // the normal asset. @@ -186,10 +175,7 @@ func testReIssuance(t *harnessTest) { t.t, t.lndHarness.Miner.Client, t.tapd, secondCollectSend, collectReissueInfo.AssetId, []uint64{0, 1}, 2, 3, ) - sendProof( - t, t.tapd, secondTapd, collectReissueAddr.ScriptKey, - collectReissueInfo, - ) + AssertNonInteractiveRecvComplete(t.t, secondTapd, 3) // The second node should show two groups, with two assets in // the collectible group and a total balance of 2 for that group. @@ -220,10 +206,7 @@ func testReIssuance(t *harnessTest) { t.t, secondTapd.ht.lndHarness.Miner.Client, secondTapd, thirdCollectSend, collectGenInfo.AssetId, []uint64{0, 1}, 0, 1, ) - sendProof( - t, secondTapd, t.tapd, collectReissueAddr.ScriptKey, - collectReissueInfo, - ) + AssertNonInteractiveRecvComplete(t.t, t.tapd, 1) // The collectible balance on the minting node should be 1, and there // should still be only two groups. @@ -353,10 +336,6 @@ func testMintWithGroupKeyErrors(t *harnessTest) { // minted assets or asset groups. secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = len(collectGroupGen) - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) @@ -382,12 +361,8 @@ func testMintWithGroupKeyErrors(t *harnessTest) { t.t, t.lndHarness.Miner.Client, t.tapd, collectSend, collectGenInfo.AssetId, []uint64{0, 1}, 0, 1, ) - sendProof( - t, t.tapd, secondTapd, collectGroupAddr.ScriptKey, - collectGenInfo, - ) - // A reissuance with the second node should still fail because the + // A re-issuance with the second node should still fail because the // group key was not created by that node. _, err = secondTapd.MintAsset(ctxb, reissueRequest) require.ErrorContains(t.t, err, "can't sign with group key") diff --git a/itest/re-org_test.go b/itest/re-org_test.go index b77762f5a..21e29a79e 100644 --- a/itest/re-org_test.go +++ b/itest/re-org_test.go @@ -53,10 +53,6 @@ func testReOrgMint(t *harnessTest) { // node will be used to synchronize universe state. secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = len(assetList) - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) @@ -139,10 +135,6 @@ func testReOrgSend(t *harnessTest) { // node will be used to synchronize universe state. secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = len(assetList) - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) @@ -170,9 +162,6 @@ func testReOrgSend(t *harnessTest) { sendAssetGen.AssetId, []uint64{sendAsset.Amount - sendAmount, sendAmount}, 0, 1, ) - _ = sendProof( - t, t.tapd, secondTapd, bobAddr.ScriptKey, sendAssetGen, - ) AssertNonInteractiveRecvComplete(t.t, secondTapd, 1) initialBlockHash := initialBlock.BlockHash() @@ -265,10 +254,6 @@ func testReOrgMintAndSend(t *harnessTest) { // node will be used to synchronize universe state. secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = len(assetList) - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) @@ -291,9 +276,6 @@ func testReOrgMintAndSend(t *harnessTest) { sendAssetGen.AssetId, []uint64{sendAsset.Amount - sendAmount, sendAmount}, 0, 1, ) - _ = sendProof( - t, t.tapd, secondTapd, bobAddr.ScriptKey, sendAssetGen, - ) AssertNonInteractiveRecvComplete(t.t, secondTapd, 1) initialBlockHash := initialBlock.BlockHash() diff --git a/itest/round_trip_send_test.go b/itest/round_trip_send_test.go index 784ca1571..d312eed21 100644 --- a/itest/round_trip_send_test.go +++ b/itest/round_trip_send_test.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "encoding/hex" - "time" "github.com/btcsuite/btcd/btcec/v2" "github.com/btcsuite/btcd/btcutil" @@ -42,10 +41,6 @@ func testRoundTripSend(t *harnessTest) { // serve as the node which'll receive the assets. secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = len(rpcAssets) - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) @@ -81,7 +76,7 @@ func testRoundTripSend(t *harnessTest) { t.t, t.lndHarness.Miner.Client, t.tapd, sendResp, genInfo.AssetId, []uint64{bobAmt, bobAmt}, 0, 1, ) - _ = sendProof(t, t.tapd, secondTapd, bobAddr.ScriptKey, genInfo) + AssertNonInteractiveRecvComplete(t.t, secondTapd, 1) // Now, Alice will request half of the assets she sent to Bob. aliceAddr, err := t.tapd.NewAddr(ctxb, &taprpc.NewAddrRequest{ @@ -101,10 +96,7 @@ func testRoundTripSend(t *harnessTest) { t.t, t.lndHarness.Miner.Client, secondTapd, sendResp, genInfo.AssetId, []uint64{aliceAmt, aliceAmt}, 0, 1, ) - _ = sendProof(t, secondTapd, t.tapd, aliceAddr.ScriptKey, genInfo) - - // Give both nodes some time to process the final transfer. - time.Sleep(time.Second * 1) + AssertNonInteractiveRecvComplete(t.t, t.tapd, 1) // Check the final state of both nodes. Each node should list // one transfer, and Alice should have 3/4 of the total units. diff --git a/itest/send_test.go b/itest/send_test.go index 2a44335e1..5fdb8603c 100644 --- a/itest/send_test.go +++ b/itest/send_test.go @@ -21,6 +21,10 @@ import ( "github.com/stretchr/testify/require" ) +var ( + transferTypeSend = taprpc.ProofTransferType_PROOF_TRANSFER_TYPE_SEND +) + // testBasicSendUnidirectional tests that we can properly send assets back and // forth between nodes. func testBasicSendUnidirectional(t *harnessTest) { @@ -88,10 +92,6 @@ func testBasicSendUnidirectional(t *harnessTest) { // node will be used to synchronize universe state. secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = len(rpcAssets) - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) @@ -215,8 +215,6 @@ func testRestartReceiverCheckBalance(t *harnessTest) { recvTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = len(rpcAssets) params.custodianProofRetrievalDelay = &custodianProofRetrievalDelay }, ) @@ -458,10 +456,6 @@ func testBasicSendPassiveAsset(t *harnessTest) { // Set up a new node that will serve as the receiving node. recvTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = len(rpcAssets) - }, ) defer func() { require.NoError(t.t, recvTapd.stop(!*noDelete)) @@ -629,7 +623,7 @@ func testReattemptFailedSendHashmailCourier(t *harnessTest) { switch eventTyped := event.Event.(type) { case *taprpc.SendAssetEvent_ProofTransferBackoffWaitEvent: ev := eventTyped.ProofTransferBackoffWaitEvent - if ev.TransferType != taprpc.ProofTransferType_PROOF_TRANSFER_TYPE_SEND { + if ev.TransferType != transferTypeSend { return false } @@ -736,7 +730,7 @@ func testReattemptFailedSendUniCourier(t *harnessTest) { switch eventTyped := event.Event.(type) { case *taprpc.SendAssetEvent_ProofTransferBackoffWaitEvent: ev := eventTyped.ProofTransferBackoffWaitEvent - if ev.TransferType != taprpc.ProofTransferType_PROOF_TRANSFER_TYPE_SEND { + if ev.TransferType != transferTypeSend { return false } @@ -1010,7 +1004,7 @@ func testOfflineReceiverEventuallyReceives(t *harnessTest) { // node. We therefore expect to receive // deliver transfer type backoff wait events // for sending transfers. - if ev.TransferType != taprpc.ProofTransferType_PROOF_TRANSFER_TYPE_SEND { + if ev.TransferType != transferTypeSend { return false } @@ -1212,10 +1206,6 @@ func testMultiInputSendNonInteractiveSingleID(t *harnessTest) { // node. Sync the new node with the primary node. bobTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = len(rpcAssets) - }, ) defer func() { require.NoError(t.t, bobTapd.stop(!*noDelete)) @@ -1240,7 +1230,6 @@ func testMultiInputSendNonInteractiveSingleID(t *harnessTest) { genInfo.AssetId, []uint64{4000, 1000}, 0, 1, ) - _ = sendProof(t, t.tapd, bobTapd, addr.ScriptKey, genInfo) AssertNonInteractiveRecvComplete(t.t, bobTapd, 1) // Second of two send events from minting node to the secondary node. @@ -1261,7 +1250,6 @@ func testMultiInputSendNonInteractiveSingleID(t *harnessTest) { genInfo.AssetId, []uint64{0, 4000}, 1, 2, ) - _ = sendProof(t, t.tapd, bobTapd, addr.ScriptKey, genInfo) AssertNonInteractiveRecvComplete(t.t, bobTapd, 2) t.Logf("Two separate send events complete, now attempting to send " + @@ -1285,7 +1273,6 @@ func testMultiInputSendNonInteractiveSingleID(t *harnessTest) { genInfo.AssetId, []uint64{0, 5000}, 0, 1, ) - _ = sendProof(t, bobTapd, t.tapd, addr.ScriptKey, genInfo) AssertNonInteractiveRecvComplete(t.t, t.tapd, 1) } @@ -1308,10 +1295,6 @@ func testSendMultipleCoins(t *harnessTest) { // node will be used to synchronize universe state. secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = len(rpcAssets) - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) @@ -1388,12 +1371,70 @@ func testSendMultipleCoins(t *harnessTest) { // Now we confirm the 5 transfers and make sure they complete as // expected. _ = MineBlocks(t.t, t.lndHarness.Miner.Client, 1, 5) - for _, addr := range bobAddrs { - _ = sendProof(t, t.tapd, secondTapd, addr.ScriptKey, genInfo) - } AssertNonInteractiveRecvComplete(t.t, secondTapd, 5) } +// testSendNoCourierUniverseImport tests that we can send assets to a node that +// has no courier, and then manually transfer the proof to the receiving using +// the universe proof import RPC method. +func testSendNoCourierUniverseImport(t *harnessTest) { + ctxb := context.Background() + + // First, we'll make a normal assets with enough units. + rpcAssets := MintAssetsConfirmBatch( + t.t, t.lndHarness.Miner.Client, t.tapd, + []*mintrpc.MintAssetRequest{simpleAssets[0]}, + ) + + firstAsset := rpcAssets[0] + genInfo := firstAsset.AssetGenesis + + // Now that we have the asset created, we'll make a new node that'll + // serve as the node which'll receive the assets. We turn off the proof + // courier by supplying a dummy implementation. + secondTapd := setupTapdHarness( + t.t, t, t.lndHarness.Bob, t.universeServer, + func(params *tapdHarnessParams) { + params.proofCourier = &proof.MockProofCourier{} + }, + ) + defer func() { + require.NoError(t.t, secondTapd.stop(!*noDelete)) + }() + + // Next, we'll attempt to transfer some amount of assets[0] to the + // receiving node. + numUnitsSend := uint64(1200) + + // Get a new address (which accepts the first asset) from the + // receiving node. + receiveAddr, err := secondTapd.NewAddr(ctxb, &taprpc.NewAddrRequest{ + AssetId: genInfo.AssetId, + Amt: numUnitsSend, + }) + require.NoError(t.t, err) + AssertAddrCreated(t.t, secondTapd, firstAsset, receiveAddr) + + // Send the assets to the receiving node. + sendResp := sendAssetsToAddr(t, t.tapd, receiveAddr) + + // Assert that the outbound transfer was confirmed. + expectedAmtAfterSend := firstAsset.Amount - numUnitsSend + ConfirmAndAssertOutboundTransfer( + t.t, t.lndHarness.Miner.Client, t.tapd, sendResp, + genInfo.AssetId, + []uint64{expectedAmtAfterSend, numUnitsSend}, 0, 1, + ) + + // Since we disabled proof couriers, we need to manually transfer the + // proof from the sender to the receiver now. We use the universe RPC + // InsertProof method to do this. + sendProofUniRPC(t, t.tapd, secondTapd, receiveAddr.ScriptKey, genInfo) + + // And now, the transfer should be completed on the receiver side too. + AssertNonInteractiveRecvComplete(t.t, secondTapd, 1) +} + // addProofTestVectorFromFile adds a proof test vector by extracting it from the // proof file found at the given asset ID and script key. func addProofTestVectorFromFile(t *testing.T, testName string, diff --git a/itest/server_harness.go b/itest/server_harness.go deleted file mode 100644 index 6fee73485..000000000 --- a/itest/server_harness.go +++ /dev/null @@ -1,108 +0,0 @@ -package itest - -import ( - "fmt" - "net" - "os" - "path/filepath" - "sync" - "time" - - "github.com/lightningnetwork/lnd/cert" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" -) - -const ( - // DefaultAutogenValidity is the default validity of a self-signed - // certificate. The value corresponds to 14 months - // (14 months * 30 days * 24 hours). - DefaultAutogenValidity = 14 * 30 * 24 * time.Hour -) - -type universeServerMock struct { - // TODO: Embed Unimplemented*Server of universe RPCs here to mock them. -} - -type serverHarness struct { - serverHost string - mockServer *grpc.Server - - certFile string - server *universeServerMock - - errChan chan error - - wg sync.WaitGroup -} - -func newServerHarness(serverHost string) *serverHarness { - return &serverHarness{ - serverHost: serverHost, - errChan: make(chan error, 1), - } -} - -func (s *serverHarness) stop() { - s.mockServer.Stop() - s.wg.Wait() -} - -func (s *serverHarness) start() error { - tempDirName, err := os.MkdirTemp("", "tapitest") - if err != nil { - return err - } - - s.certFile = filepath.Join(tempDirName, "proxy.cert") - keyFile := filepath.Join(tempDirName, "proxy.key") - creds, err := genCertPair(s.certFile, keyFile) - if err != nil { - return err - } - - httpListener, err := net.Listen("tcp", s.serverHost) - if err != nil { - return err - } - - s.mockServer = grpc.NewServer(grpc.Creds(creds)) - s.server = &universeServerMock{} - - // TODO(guggero): Register universe RPC servers here. - - s.wg.Add(1) - go func() { - defer s.wg.Done() - s.errChan <- s.mockServer.Serve(httpListener) - }() - - return nil -} - -// genCertPair generates a pair of private key and certificate and returns them -// in different formats needed to spin up test servers and clients. -func genCertPair(certFile, keyFile string) (credentials.TransportCredentials, - error) { - - certBytes, keyBytes, err := cert.GenCertPair( - "itest autogenerated cert", nil, nil, false, - DefaultAutogenValidity, - ) - if err != nil { - return nil, fmt.Errorf("unable to generate cert pair: %v", err) - } - - // Now that we have the certificate and key, we'll store them - // to the file system. - err = cert.WriteCertPair(certFile, keyFile, certBytes, keyBytes) - if err != nil { - return nil, fmt.Errorf("error writing cert pair: %w", err) - } - - creds, err := credentials.NewServerTLSFromFile(certFile, keyFile) - if err != nil { - return nil, fmt.Errorf("unable to load cert file: %v", err) - } - return creds, nil -} diff --git a/itest/tapd_harness.go b/itest/tapd_harness.go index c73bb1fe4..05664927b 100644 --- a/itest/tapd_harness.go +++ b/itest/tapd_harness.go @@ -46,13 +46,37 @@ var ( tapdb.DefaultPostgresFixtureLifetime, "The amount of time to "+ "allow the postgres fixture to run in total. Needs "+ "to be increased for long-running tests.") + + // defaultHashmailBackoffConfig is the default backoff config we'll use + // for sending proofs with the hashmail courier. + defaultHashmailBackoffConfig = proof.BackoffCfg{ + BackoffResetWait: time.Second, + NumTries: 5, + InitialBackoff: 300 * time.Millisecond, + MaxBackoff: 600 * time.Millisecond, + } + + // defaultUniverseRpcBackoffConfig is the default backoff config we'll + // use for sending proofs with the universe RPC courier. + defaultUniverseRpcBackoffConfig = proof.BackoffCfg{ + SkipInitDelay: true, + BackoffResetWait: time.Second, + NumTries: 5, + InitialBackoff: 300 * time.Millisecond, + MaxBackoff: 600 * time.Millisecond, + } + + // defaultProofRetrievalDelay is the default delay we'll use for the + // custodian to wait from observing a transaction on-chan to retrieving + // the proof from the courier. + defaultProofRetrievalDelay = 200 * time.Millisecond ) const ( // defaultProofTransferReceiverAckTimeout is the default itest specific // timeout we'll use for waiting for a receiver to acknowledge a proof // transfer. - defaultProofTransferReceiverAckTimeout = 15 * time.Second + defaultProofTransferReceiverAckTimeout = 5 * time.Second ) // tapdHarness is a test harness that holds everything that is needed to @@ -86,6 +110,10 @@ type harnessOpts struct { proofCourier proof.CourierHarness custodianProofRetrievalDelay *time.Duration addrAssetSyncerDisable bool + + // fedSyncTickerInterval is the interval at which the federation envoy + // sync ticker will fire. + fedSyncTickerInterval *time.Duration } type harnessOption func(*harnessOpts) @@ -182,14 +210,11 @@ func newTapdHarness(t *testing.T, ht *harnessTest, cfg tapdConfig, // Populate proof courier specific config fields. // // Use passed in backoff config or default config. - backoffCfg := &proof.BackoffCfg{ - BackoffResetWait: 2 * time.Second, - NumTries: 3, - InitialBackoff: 2 * time.Second, - MaxBackoff: 2 * time.Second, - } + hashmailBackoffCfg := defaultHashmailBackoffConfig + universeRpcBackoffCfg := defaultUniverseRpcBackoffConfig if opts.proofSendBackoffCfg != nil { - backoffCfg = opts.proofSendBackoffCfg + hashmailBackoffCfg = *opts.proofSendBackoffCfg + universeRpcBackoffCfg = *opts.proofSendBackoffCfg } // Used passed in proof receiver ack timeout or default. @@ -198,12 +223,12 @@ func newTapdHarness(t *testing.T, ht *harnessTest, cfg tapdConfig, receiverAckTimeout = *opts.proofReceiverAckTimeout } - // TODO(ffranr): Disentangle the hashmail config from the universe RPC - // courier config. Right now, the universe courier takes the backoff - // config from the hashmail courier config. finalCfg.HashMailCourier = &proof.HashMailCourierCfg{ ReceiverAckTimeout: receiverAckTimeout, - BackoffCfg: backoffCfg, + BackoffCfg: &hashmailBackoffCfg, + } + finalCfg.UniverseRpcCourier = &proof.UniverseRpcCourierCfg{ + BackoffCfg: &universeRpcBackoffCfg, } switch typedProofCourier := (opts.proofCourier).(type) { @@ -213,7 +238,7 @@ func newTapdHarness(t *testing.T, ht *harnessTest, cfg tapdConfig, typedProofCourier.ListenAddr, ) - case *UniverseRPCHarness: + case *universeServerHarness: finalCfg.DefaultProofCourierAddr = fmt.Sprintf( "%s://%s", proof.UniverseRpcCourierType, typedProofCourier.ListenAddr, @@ -221,14 +246,21 @@ func newTapdHarness(t *testing.T, ht *harnessTest, cfg tapdConfig, default: finalCfg.DefaultProofCourierAddr = "" - finalCfg.HashMailCourier = nil } + ht.t.Logf("Using proof courier address: %v", + finalCfg.DefaultProofCourierAddr) + // Set the custodian proof retrieval delay if it was specified. + finalCfg.CustodianProofRetrievalDelay = defaultProofRetrievalDelay if opts.custodianProofRetrievalDelay != nil { finalCfg.CustodianProofRetrievalDelay = *opts.custodianProofRetrievalDelay } + if opts.fedSyncTickerInterval != nil { + finalCfg.Universe.SyncInterval = *opts.fedSyncTickerInterval + } + return &tapdHarness{ cfg: &cfg, clientCfg: finalCfg, diff --git a/itest/test_harness.go b/itest/test_harness.go index feed49d23..2614bbe82 100644 --- a/itest/test_harness.go +++ b/itest/test_harness.go @@ -95,7 +95,7 @@ type harnessTest struct { // nil if not yet set up. lndHarness *lntest.HarnessTest - universeServer *serverHarness + universeServer *universeServerHarness tapd *tapdHarness @@ -107,7 +107,7 @@ type harnessTest struct { // newHarnessTest creates a new instance of a harnessTest from a regular // testing.T instance. func (h *harnessTest) newHarnessTest(t *testing.T, net *lntest.HarnessTest, - universeServer *serverHarness, tapd *tapdHarness, + universeServer *universeServerHarness, tapd *tapdHarness, proofCourier proof.CourierHarness) *harnessTest { return &harnessTest{ @@ -174,7 +174,11 @@ func (h *harnessTest) LogfTimestamped(format string, args ...interface{}) { // shutdown stops both the mock universe and tapd server. func (h *harnessTest) shutdown(_ *testing.T) error { - h.universeServer.stop() + err := h.universeServer.Stop() + if err != nil { + return fmt.Errorf("unable to stop universe server harness: "+ + "%w", err) + } if h.proofCourier != nil { err := h.proofCourier.Stop() @@ -184,7 +188,7 @@ func (h *harnessTest) shutdown(_ *testing.T) error { } } - err := h.tapd.stop(!*noDelete) + err = h.tapd.stop(!*noDelete) if err != nil { return fmt.Errorf("unable to stop tapd: %v", err) } @@ -236,6 +240,25 @@ func (h *harnessTest) syncUniverseState(target, syncer *tapdHarness, require.Equal(h.t, numExpectedAssets, numAssets) } +// addFederationServer adds a new federation server to the given tapd harness. +func (h *harnessTest) addFederationServer(host string, target *tapdHarness) { + ctxt, cancel := context.WithTimeout( + context.Background(), defaultWaitTimeout, + ) + defer cancel() + + _, err := target.AddFederationServer( + ctxt, &unirpc.AddFederationServerRequest{ + Servers: []*unirpc.UniverseFederationServer{ + { + Host: host, + }, + }, + }, + ) + require.NoError(h.t, err) +} + // nextAvailablePort returns the first port that is available for listening by // a new node. It panics if no port is found and the maximum available TCP port // is reached. @@ -269,37 +292,34 @@ func nextAvailablePort() int { func setupHarnesses(t *testing.T, ht *harnessTest, lndHarness *lntest.HarnessTest, proofCourierType proof.CourierType) (*tapdHarness, - *serverHarness, proof.CourierHarness) { + *universeServerHarness, proof.CourierHarness) { + + universeServer := newUniverseServerHarness(t, ht, lndHarness.Bob) + + t.Logf("Starting universe server harness, listening on %v", + universeServer.ListenAddr) + + err := universeServer.Start(nil) + require.NoError(t, err, "universe server harness") // If a proof courier type is specified, start test specific proof // courier service and attach to test harness. var proofCourier proof.CourierHarness switch proofCourierType { - case proof.DisabledCourier: - // Proof courier disabled, do nothing. - case proof.HashmailCourierType: port := nextAvailablePort() - apHarness := NewApertureHarness(ht.t, port) - proofCourier = &apHarness + apertureHarness := NewApertureHarness(ht.t, port) + err := apertureHarness.Start(nil) + require.NoError(t, err, "aperture proof courier harness") - case proof.UniverseRpcCourierType: - proofCourier = NewUniverseRPCHarness(t, ht, lndHarness.Bob) - } + proofCourier = apertureHarness - // Start the proof courier harness if specified. - if proofCourier != nil { - err := proofCourier.Start(nil) - require.NoError(t, err, "unable to start proof courier harness") + // If nothing is specified, we use the universe RPC proof courier by + // default. + default: + proofCourier = universeServer } - mockServerAddr := fmt.Sprintf( - node.ListenerFormat, node.NextAvailablePort(), - ) - universeServer := newServerHarness(mockServerAddr) - err := universeServer.start() - require.NoError(t, err) - // Create a tapd that uses Bob and connect it to the universe server. tapdHarness := setupTapdHarness( t, ht, lndHarness.Alice, universeServer, @@ -345,6 +365,14 @@ type tapdHarnessParams struct { // startupSyncNumAssets is the number of assets that are expected to be // synced from the above node. startupSyncNumAssets int + + // fedSyncTickerInterval is the interval at which the federation envoy + // sync ticker will fire. + fedSyncTickerInterval *time.Duration + + // noDefaultUniverseSync indicates whether the default universe server + // should be added as a federation server or not. + noDefaultUniverseSync bool } type Option func(*tapdHarnessParams) @@ -352,7 +380,7 @@ type Option func(*tapdHarnessParams) // setupTapdHarness creates a new tapd that connects to the given lnd node // and to the given universe server. func setupTapdHarness(t *testing.T, ht *harnessTest, - node *node.HarnessNode, universe *serverHarness, + node *node.HarnessNode, universe *universeServerHarness, opts ...Option) *tapdHarness { // Set parameters by executing option functions. @@ -378,20 +406,25 @@ func setupTapdHarness(t *testing.T, ht *harnessTest, ho.proofCourier = selectedProofCourier ho.custodianProofRetrievalDelay = params.custodianProofRetrievalDelay ho.addrAssetSyncerDisable = params.addrAssetSyncerDisable + ho.fedSyncTickerInterval = params.fedSyncTickerInterval } - tapdHarness, err := newTapdHarness( - t, ht, tapdConfig{ - NetParams: harnessNetParams, - LndNode: node, - }, harnessOpts, - ) + tapdHarness, err := newTapdHarness(t, ht, tapdConfig{ + NetParams: harnessNetParams, + LndNode: node, + }, harnessOpts) require.NoError(t, err) // Start the tapd harness now. err = tapdHarness.start(params.expectErrExit) require.NoError(t, err) + // Add the default universe server as a federation server, unless + // specifically indicated by the caller. + if !params.noDefaultUniverseSync { + ht.addFederationServer(universe.service.rpcHost(), tapdHarness) + } + // Before we exit, we'll check to see if we need to sync the universe // state. if params.startupSyncNode != nil { diff --git a/itest/test_list_on_test.go b/itest/test_list_on_test.go index 841db91a1..00fbe7914 100644 --- a/itest/test_list_on_test.go +++ b/itest/test_list_on_test.go @@ -2,9 +2,7 @@ package itest -import ( - "github.com/lightninglabs/taproot-assets/proof" -) +import "github.com/lightninglabs/taproot-assets/proof" var testCases = []*testCase{ { @@ -48,14 +46,12 @@ var testCases = []*testCase{ proofCourierType: proof.HashmailCourierType, }, { - name: "basic send universerpc proof courier", - test: testBasicSendUnidirectional, - proofCourierType: proof.UniverseRpcCourierType, + name: "basic send unidirectional", + test: testBasicSendUnidirectional, }, { - name: "restart receiver check balance", - test: testRestartReceiverCheckBalance, - proofCourierType: proof.UniverseRpcCourierType, + name: "restart receiver check balance", + test: testRestartReceiverCheckBalance, }, { name: "resume pending package send", @@ -68,20 +64,22 @@ var testCases = []*testCase{ proofCourierType: proof.HashmailCourierType, }, { - name: "reattempt failed send uni courier", - test: testReattemptFailedSendUniCourier, - proofCourierType: proof.UniverseRpcCourierType, + name: "reattempt failed send uni courier", + test: testReattemptFailedSendUniCourier, }, { - name: "reattempt failed receive uni courier", - test: testReattemptFailedReceiveUniCourier, - proofCourierType: proof.UniverseRpcCourierType, + name: "reattempt failed receive uni courier", + test: testReattemptFailedReceiveUniCourier, }, { name: "offline receiver eventually receives", test: testOfflineReceiverEventuallyReceives, proofCourierType: proof.HashmailCourierType, }, + { + name: "addr send no proof courier with local universe import", + test: testSendNoCourierUniverseImport, + }, { name: "basic send passive asset", test: testBasicSendPassiveAsset, @@ -109,14 +107,12 @@ var testCases = []*testCase{ proofCourierType: proof.HashmailCourierType, }, { - name: "collectible send rpc courier", - test: testCollectibleSend, - proofCourierType: proof.UniverseRpcCourierType, + name: "collectible send", + test: testCollectibleSend, }, { - name: "collectible group send rpc courier", - test: testCollectibleGroupSend, - proofCourierType: proof.UniverseRpcCourierType, + name: "collectible group send", + test: testCollectibleGroupSend, }, { name: "re-issuance", @@ -183,6 +179,10 @@ var testCases = []*testCase{ name: "universe sync", test: testUniverseSync, }, + { + name: "universe sync manual insert", + test: testUniverseManualSync, + }, { name: "universe federation", test: testUniverseFederation, @@ -203,6 +203,10 @@ var testCases = []*testCase{ name: "universe pagination simple", test: testUniversePaginationSimple, }, + { + name: "mint proof repeat fed sync attempt", + test: testMintProofRepeatFedSyncAttempt, + }, } var optionalTestCases = []*testCase{ diff --git a/itest/universe_federation_test.go b/itest/universe_federation_test.go new file mode 100644 index 000000000..69d7dd712 --- /dev/null +++ b/itest/universe_federation_test.go @@ -0,0 +1,99 @@ +package itest + +import ( + "context" + "time" + + "github.com/lightninglabs/taproot-assets/taprpc/mintrpc" + unirpc "github.com/lightninglabs/taproot-assets/taprpc/universerpc" + "github.com/stretchr/testify/require" +) + +// testMintProofRepeatFedSyncAttempt tests that the minting node will retry +// pushing the minting proofs to the federation server peer node, if the peer +// node is offline at the time of the initial sync attempt. +func testMintProofRepeatFedSyncAttempt(t *harnessTest) { + // Create a new minting node, without hooking it up to any existing + // Universe server. We will also set the sync ticker to 4 second, so + // that we can test that the proof push sync is retried and eventually + // succeeds after the fed server peer node reappears online. + syncTickerInterval := 4 * time.Second + mintingNode := setupTapdHarness( + t.t, t, t.lndHarness.Bob, nil, + func(params *tapdHarnessParams) { + params.fedSyncTickerInterval = &syncTickerInterval + params.noDefaultUniverseSync = true + }, + ) + defer func() { + require.NoError(t.t, mintingNode.stop(!*noDelete)) + }() + + // We'll use the main node as our federation universe server + // counterparty. + fedServerNode := t.tapd + + // Keep a reference to the fed server node RPC host address, so that we + // can assert that it has not changed after the restart. This is + // important, because the minting node will be retrying the proof push + // to this address. + fedServerNodeRpcHost := fedServerNode.rpcHost() + + // Register the fedServerNode as a federation universe server with the + // minting node. + ctxb := context.Background() + ctxt, cancel := context.WithTimeout(ctxb, defaultWaitTimeout) + defer cancel() + + _, err := mintingNode.AddFederationServer( + ctxt, &unirpc.AddFederationServerRequest{ + Servers: []*unirpc.UniverseFederationServer{ + { + Host: fedServerNodeRpcHost, + }, + }, + }, + ) + require.NoError(t.t, err) + + // Assert that the fed server node has not seen any asset proofs. + AssertUniverseStats(t.t, fedServerNode, 0, 0, 0) + + // Stop the federation server peer node, so that it does not receive the + // newly minted asset proofs immediately upon minting. + t.Logf("Stopping fed server tapd node") + require.NoError(t.t, fedServerNode.stop(false)) + + // Now that federation peer node is inactive, we'll mint some assets. + t.Logf("Minting assets on minting node") + rpcAssets := MintAssetsConfirmBatch( + t.t, t.lndHarness.Miner.Client, mintingNode, + []*mintrpc.MintAssetRequest{ + simpleAssets[0], issuableAssets[0], + }, + ) + require.Len(t.t, rpcAssets, 2) + + t.lndHarness.MineBlocks(7) + + // Wait for the minting node to attempt (and fail) to push the minting + // proofs to the fed peer node. We wait some multiple of the sync ticker + // interval to ensure that the minting node has had time to retry the + // proof push sync. + time.Sleep(syncTickerInterval * 2) + + // Start the federation server peer node. The federation envoy component + // of our minting node should currently be retrying the proof push sync + // with the federation peer at each tick. + t.Logf("Start (previously stopped) fed server tapd node") + err = fedServerNode.start(false) + require.NoError(t.t, err) + + // Ensure that the federation server node RPC host address has not + // changed after the restart. If it has, then the minting node will be + // retrying the proof push to the wrong address. + require.Equal(t.t, fedServerNodeRpcHost, fedServerNode.rpcHost()) + + t.Logf("Assert that fed peer node has seen the asset minting proofs") + AssertUniverseStats(t.t, fedServerNode, 2, 2, 1) +} diff --git a/itest/universe_pagination_test.go b/itest/universe_pagination_test.go index 2a639f938..9ef4c1065 100644 --- a/itest/universe_pagination_test.go +++ b/itest/universe_pagination_test.go @@ -36,7 +36,7 @@ func testUniversePaginationSimple(t *harnessTest) { // If we create a second tapd instance and sync the universe state, // the synced tree should match the source tree. bob := setupTapdHarness( - t.t, t, t.lndHarness.Bob, nil, + t.t, t, t.lndHarness.Bob, t.universeServer, ) defer func() { require.NoError(t.t, bob.stop(!*noDelete)) diff --git a/itest/universe_server_harness.go b/itest/universe_server_harness.go new file mode 100644 index 000000000..b461d74da --- /dev/null +++ b/itest/universe_server_harness.go @@ -0,0 +1,48 @@ +package itest + +import ( + "testing" + + "github.com/lightninglabs/taproot-assets/proof" + "github.com/lightningnetwork/lnd/lntest/node" + "github.com/stretchr/testify/require" +) + +type universeServerHarness struct { + // service is the instance of the universe tap service. + service *tapdHarness + + // ListenAddr is the address that the service is listening on. + ListenAddr string +} + +func newUniverseServerHarness(t *testing.T, ht *harnessTest, + lndHarness *node.HarnessNode) *universeServerHarness { + + service, err := newTapdHarness(t, ht, tapdConfig{ + NetParams: harnessNetParams, + LndNode: lndHarness, + }) + require.NoError(t, err) + + return &universeServerHarness{ + service: service, + ListenAddr: service.rpcHost(), + } +} + +// Start starts the service. +func (h *universeServerHarness) Start(_ chan error) error { + return h.service.start(false) +} + +// Stop stops the service. +func (h *universeServerHarness) Stop() error { + // Don't delete temporary data on stop. This will allow us to cleanly + // restart the service mid-test. + return h.service.stop(false) +} + +// Ensure that universeServerHarness implements the proof.CourierHarness +// interface. +var _ proof.CourierHarness = (*universeServerHarness)(nil) diff --git a/itest/universe_test.go b/itest/universe_test.go index f4340aeab..55795b201 100644 --- a/itest/universe_test.go +++ b/itest/universe_test.go @@ -11,17 +11,15 @@ import ( "github.com/btcsuite/btcd/btcec/v2" "github.com/btcsuite/btcd/btcec/v2/schnorr" - "github.com/lightninglabs/taproot-assets/internal/test" - tap "github.com/lightninglabs/taproot-assets" "github.com/lightninglabs/taproot-assets/asset" "github.com/lightninglabs/taproot-assets/fn" + "github.com/lightninglabs/taproot-assets/internal/test" "github.com/lightninglabs/taproot-assets/mssmt" "github.com/lightninglabs/taproot-assets/taprpc" "github.com/lightninglabs/taproot-assets/taprpc/mintrpc" unirpc "github.com/lightninglabs/taproot-assets/taprpc/universerpc" "github.com/lightninglabs/taproot-assets/universe" - "github.com/lightningnetwork/lnd/lntest/wait" "github.com/stretchr/testify/require" "golang.org/x/exp/maps" @@ -44,7 +42,10 @@ func testUniverseSync(t *harnessTest) { // With those assets created, we'll now create a new node that we'll // use to exercise the Universe sync. bob := setupTapdHarness( - t.t, t, t.lndHarness.Bob, nil, + t.t, t, t.lndHarness.Bob, t.universeServer, + func(params *tapdHarnessParams) { + params.noDefaultUniverseSync = true + }, ) defer func() { require.NoError(t.t, bob.stop(!*noDelete)) @@ -262,6 +263,88 @@ func testUniverseSync(t *harnessTest) { ) } +// testUniverseManualSync tests that we're able to insert proofs manually into +// a universe instead of using a full sync. +func testUniverseManualSync(t *harnessTest) { + miner := t.lndHarness.Miner.Client + + // First, we'll create out usual set of issuable assets. + rpcIssuableAssets := MintAssetsConfirmBatch( + t.t, miner, t.tapd, issuableAssets, + ) + + // With those assets created, we'll now create a new node that we'll + // use to exercise the manual Universe sync. + bob := setupTapdHarness( + t.t, t, t.lndHarness.Bob, t.universeServer, + func(params *tapdHarnessParams) { + params.noDefaultUniverseSync = true + }, + ) + defer func() { + require.NoError(t.t, bob.stop(!*noDelete)) + }() + + ctxb := context.Background() + ctxt, cancel := context.WithTimeout(ctxb, defaultWaitTimeout) + defer cancel() + + // We now side load the issuance proof of our first asset into Bob's + // universe. + firstAsset := rpcIssuableAssets[0] + firstAssetGen := firstAsset.AssetGenesis + sendProofUniRPC(t, t.tapd, bob, firstAsset.ScriptKey, firstAssetGen) + + // We should also be able to fetch an asset from Bob's Universe, and + // query for that asset with the compressed script key. + firstOutpoint, err := tap.UnmarshalOutpoint( + firstAsset.ChainAnchor.AnchorOutpoint, + ) + require.NoError(t.t, err) + + firstAssetProofQuery := unirpc.UniverseKey{ + Id: &unirpc.ID{ + Id: &unirpc.ID_GroupKey{ + GroupKey: firstAsset.AssetGroup.TweakedGroupKey, + }, + ProofType: unirpc.ProofType_PROOF_TYPE_ISSUANCE, + }, + LeafKey: &unirpc.AssetKey{ + Outpoint: &unirpc.AssetKey_Op{ + Op: &unirpc.Outpoint{ + HashStr: firstOutpoint.Hash.String(), + Index: int32(firstOutpoint.Index), + }, + }, + ScriptKey: &unirpc.AssetKey_ScriptKeyBytes{ + ScriptKeyBytes: firstAsset.ScriptKey, + }, + }, + } + + // We should now be able to query for the asset proof. + _, err = bob.QueryProof(ctxt, &firstAssetProofQuery) + require.NoError(t.t, err) + + // We should now also be able to fetch the meta data and group key for + // the asset. + metaData, err := bob.FetchAssetMeta(ctxt, &taprpc.FetchAssetMetaRequest{ + Asset: &taprpc.FetchAssetMetaRequest_MetaHash{ + MetaHash: firstAssetGen.MetaHash, + }, + }) + require.NoError(t.t, err) + require.Equal(t.t, firstAssetGen.MetaHash, metaData.MetaHash) + + // We should be able to create a new address for the asset, since that + // requires us to know the full genesis and group key. + _, err = bob.NewAddr(ctxt, &taprpc.NewAddrRequest{ + AssetId: firstAssetGen.AssetId, + Amt: 500, + }) + require.NoError(t.t, err) +} + // unmarshalMerkleSumNode un-marshals a protobuf MerkleSumNode. func unmarshalMerkleSumNode(root *unirpc.MerkleSumNode) mssmt.Node { var nodeHash mssmt.NodeHash @@ -387,7 +470,10 @@ func testUniverseFederation(t *harnessTest) { // We'll kick off the test by making a new node, without hooking it up to // any existing Universe server. bob := setupTapdHarness( - t.t, t, t.lndHarness.Bob, nil, + t.t, t, t.lndHarness.Bob, t.universeServer, + func(params *tapdHarnessParams) { + params.noDefaultUniverseSync = true + }, ) defer func() { require.NoError(t.t, bob.stop(!*noDelete)) diff --git a/itest/universerpc_harness.go b/itest/universerpc_harness.go deleted file mode 100644 index 4f0073fc2..000000000 --- a/itest/universerpc_harness.go +++ /dev/null @@ -1,53 +0,0 @@ -package itest - -import ( - "testing" - - "github.com/lightninglabs/taproot-assets/proof" - "github.com/lightningnetwork/lnd/lntest/node" - "github.com/stretchr/testify/require" -) - -// UniverseRPCHarness is an integration testing harness for the universe tap -// service. -type UniverseRPCHarness struct { - // service is the instance of the universe tap service. - service *tapdHarness - - // ListenAddr is the address that the service is listening on. - ListenAddr string -} - -// NewUniverseRPCHarness creates a new test harness for a universe tap service. -func NewUniverseRPCHarness(t *testing.T, ht *harnessTest, - lndHarness *node.HarnessNode) *UniverseRPCHarness { - - service, err := newTapdHarness( - t, ht, tapdConfig{ - NetParams: harnessNetParams, - LndNode: lndHarness, - }, - ) - require.NoError(t, err) - - return &UniverseRPCHarness{ - service: service, - ListenAddr: service.rpcHost(), - } -} - -// Start starts the service. -func (h *UniverseRPCHarness) Start(_ chan error) error { - return h.service.start(false) -} - -// Stop stops the service. -func (h *UniverseRPCHarness) Stop() error { - // Don't delete temporary data on stop. This will allow us to cleanly - // restart the service mid-test. - return h.service.stop(false) -} - -// Ensure that NewUniverseRPCHarness implements the proof.CourierHarness -// interface. -var _ proof.CourierHarness = (*UniverseRPCHarness)(nil) diff --git a/monitoring/asset_balances_collector.go b/monitoring/asset_balances_collector.go new file mode 100644 index 000000000..462711c6d --- /dev/null +++ b/monitoring/asset_balances_collector.go @@ -0,0 +1,114 @@ +package monitoring + +import ( + "context" + "errors" + "sync" + + "github.com/prometheus/client_golang/prometheus" +) + +// assetBalancesCollector is a Prometheus collector that exports the balances +// of all taproot assets. +type assetBalancesCollector struct { + collectMx sync.Mutex + + cfg *PrometheusConfig + registry *prometheus.Registry + + balancesVec *prometheus.GaugeVec + + utxosVec *prometheus.GaugeVec +} + +func newAssetBalancesCollector(cfg *PrometheusConfig, + registry *prometheus.Registry) (*assetBalancesCollector, error) { + + if cfg == nil { + return nil, errors.New("asset collector prometheus cfg is nil") + } + + if cfg.AssetStore == nil { + return nil, errors.New("asset collector asset store is nil") + } + + return &assetBalancesCollector{ + cfg: cfg, + registry: registry, + balancesVec: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "asset_balances", + Help: "Balances of all taproot assets", + }, + []string{"asset_name"}, + ), + utxosVec: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "utxos_assets_held", + Help: "Number of UTXOs used for taproot assets", + }, + []string{"outpoint"}, + ), + }, nil +} + +// Describe sends the super-set of all possible descriptors of metrics +// collected by this Collector to the provided channel and returns once the +// last descriptor has been sent. +// +// NOTE: Part of the prometheus.Collector interface. +func (a *assetBalancesCollector) Describe(ch chan<- *prometheus.Desc) { + a.collectMx.Lock() + defer a.collectMx.Unlock() + + a.balancesVec.Describe(ch) + a.utxosVec.Describe(ch) +} + +// Collect is called by the Prometheus registry when collecting metrics. +// +// NOTE: Part of the prometheus.Collector interface. +func (a *assetBalancesCollector) Collect(ch chan<- prometheus.Metric) { + a.collectMx.Lock() + defer a.collectMx.Unlock() + + ctxdb, cancel := context.WithTimeout(context.Background(), dbTimeout) + defer cancel() + + assets, err := a.cfg.AssetStore.FetchAllAssets(ctxdb, false, false, nil) + if err != nil { + log.Errorf("unable to fetch assets: %v", err) + return + } + + utxos, err := a.cfg.AssetStore.FetchManagedUTXOs(ctxdb) + if err != nil { + log.Errorf("unable to fetch utxos: %v", err) + return + } + + a.utxosVec.Reset() + a.balancesVec.Reset() + + utxoMap := make(map[string]prometheus.Gauge) + + for _, utxo := range utxos { + utxoOutpoint := utxo.OutPoint.String() + utxoMap[utxoOutpoint] = a.utxosVec.WithLabelValues(utxoOutpoint) + } + + for _, asset := range assets { + a.balancesVec.WithLabelValues(asset.Tag). + Set(float64(asset.Amount)) + + utxoGauge, ok := utxoMap[asset.AnchorOutpoint.String()] + if !ok { + continue + } + + utxoGauge.Inc() + } + + a.balancesVec.Collect(ch) + a.utxosVec.Collect(ch) +} diff --git a/monitoring/asset_collector.go b/monitoring/asset_collector.go new file mode 100644 index 000000000..83e608746 --- /dev/null +++ b/monitoring/asset_collector.go @@ -0,0 +1,128 @@ +package monitoring + +import ( + "context" + "errors" + "sync" + + "github.com/prometheus/client_golang/prometheus" +) + +const ( + numAssetsMintedMetric = "num_assets_minted" + + numTotalGroupsMetric = "num_total_groups" + + numTotalSyncsMetric = "num_total_syncs" + + numTotalProofsMetric = "num_total_proofs" +) + +// universeStatsCollector is a Prometheus collector that exports the stats of +// the universe. +type universeStatsCollector struct { + collectMx sync.Mutex + + cfg *PrometheusConfig + registry *prometheus.Registry + + gauges map[string]prometheus.Gauge +} + +func newUniverseStatsCollector(cfg *PrometheusConfig, + registry *prometheus.Registry) (*universeStatsCollector, error) { + + if cfg == nil { + return nil, errors.New("universe stats collector prometheus " + + "cfg is nil") + } + + if cfg.UniverseStats == nil { + return nil, errors.New("universe stats collector universe " + + "stats is nil") + } + + gaugesMap := map[string]prometheus.Gauge{ + numAssetsMintedMetric: prometheus.NewGauge( + prometheus.GaugeOpts{ + Name: numAssetsMintedMetric, + Help: "Total number of assets minted", + }, + ), + numTotalGroupsMetric: prometheus.NewGauge( + prometheus.GaugeOpts{ + Name: numTotalGroupsMetric, + Help: "Total number of groups", + }, + ), + numTotalSyncsMetric: prometheus.NewGauge( + prometheus.GaugeOpts{ + Name: numTotalSyncsMetric, + Help: "Total number of syncs", + }, + ), + numTotalProofsMetric: prometheus.NewGauge( + prometheus.GaugeOpts{ + Name: numTotalProofsMetric, + Help: "Total number of proofs", + }, + ), + } + + return &universeStatsCollector{ + cfg: cfg, + registry: registry, + gauges: gaugesMap, + }, nil +} + +// Describe sends the super-set of all possible descriptors of metrics +// collected by this Collector to the provided channel and returns once the +// last descriptor has been sent. +// +// NOTE: Part of the prometheus.Collector interface. +func (a *universeStatsCollector) Describe(ch chan<- *prometheus.Desc) { + a.collectMx.Lock() + defer a.collectMx.Unlock() + + for _, gauge := range a.gauges { + gauge.Describe(ch) + } +} + +// Collect is called by the Prometheus registry when collecting metrics. +// +// NOTE: Part of the prometheus.Collector interface. +func (a *universeStatsCollector) Collect(ch chan<- prometheus.Metric) { + a.collectMx.Lock() + defer a.collectMx.Unlock() + + ctx, cancel := context.WithTimeout(context.Background(), dbTimeout) + defer cancel() + + universeStats, err := a.cfg.UniverseStats.AggregateSyncStats(ctx) + if err != nil { + log.Errorf("unable to get aggregate universe stats: %v", err) + return + } + + a.gauges[numAssetsMintedMetric].Set( + float64(universeStats.NumTotalAssets), + ) + + a.gauges[numTotalGroupsMetric].Set( + float64(universeStats.NumTotalGroups), + ) + + a.gauges[numTotalSyncsMetric].Set( + float64(universeStats.NumTotalSyncs), + ) + + a.gauges[numTotalProofsMetric].Set( + float64(universeStats.NumTotalProofs), + ) + + for _, gauge := range a.gauges { + gauge.Collect(ch) + } +} diff --git a/monitoring/config.go b/monitoring/config.go index d7f7c0b77..53dd37381 100644 --- a/monitoring/config.go +++ b/monitoring/config.go @@ -1,6 +1,11 @@ package monitoring -import "google.golang.org/grpc" +import ( + "github.com/lightninglabs/taproot-assets/tapdb" + "github.com/lightninglabs/taproot-assets/tapgarden" + "github.com/lightninglabs/taproot-assets/universe" + "google.golang.org/grpc" +) // PrometheusConfig is the set of configuration data that specifies if // Prometheus metric exporting is activated, and if so the listening address of @@ -17,6 +22,18 @@ type PrometheusConfig struct { // generic RPC metrics to monitor the health of the service. RPCServer *grpc.Server + // UniverseStats is used to collect any stats that are relevant to the + // universe. + UniverseStats universe.Telemetry + + // AssetStore is used to collect any stats that are relevant to the + // asset store. + AssetStore *tapdb.AssetStore + + // AssetMinter is used to collect any stats that are relevant to the + // asset minter. + AssetMinter tapgarden.Planter + // PerfHistograms indicates if the additional histogram information for // latency, and handling time of gRPC calls should be enabled. This // generates additional data, and consume more memory for the diff --git a/monitoring/garden_collector.go b/monitoring/garden_collector.go new file mode 100644 index 000000000..c37e96e83 --- /dev/null +++ b/monitoring/garden_collector.go @@ -0,0 +1,120 @@ +package monitoring + +import ( + "errors" + "sync" + + "github.com/lightninglabs/taproot-assets/tapgarden" + "github.com/prometheus/client_golang/prometheus" +) + +// assetBalancesCollector is a Prometheus collector that exports the balances +// of all taproot assets. +type gardenCollector struct { + collectMx sync.Mutex + + cfg *PrometheusConfig + registry *prometheus.Registry + + pendingBatches *prometheus.GaugeVec + completedBatches prometheus.Gauge +} + +func newGardenCollector(cfg *PrometheusConfig, + registry *prometheus.Registry) (*gardenCollector, error) { + + if cfg == nil { + return nil, errors.New("garden collector prometheus cfg is nil") + } + + if cfg.AssetStore == nil { + return nil, errors.New("garden collector asset store is nil") + } + + return &gardenCollector{ + cfg: cfg, + registry: registry, + pendingBatches: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "mint_batches", + Help: "Batched mint transactions", + }, + []string{"batch_pubkey"}, + ), + completedBatches: prometheus.NewGauge( + prometheus.GaugeOpts{ + Name: "completed_batches", + Help: "Total number of completed mint batches", + }, + ), + }, nil +} + +// Describe sends the super-set of all possible descriptors of metrics +// collected by this Collector to the provided channel and returns once the +// last descriptor has been sent. +// +// NOTE: Part of the prometheus.Collector interface. +func (a *gardenCollector) Describe(ch chan<- *prometheus.Desc) { + a.collectMx.Lock() + defer a.collectMx.Unlock() + + a.pendingBatches.Describe(ch) + a.completedBatches.Describe(ch) +} + +// Collect is called by the Prometheus registry when collecting metrics. +// +// NOTE: Part of the prometheus.Collector interface. +func (a *gardenCollector) Collect(ch chan<- prometheus.Metric) { + a.collectMx.Lock() + defer a.collectMx.Unlock() + + a.completedBatches.Set(0) + + // Get the number of pending batches. + batches, err := a.cfg.AssetMinter.ListBatches(nil) + if err != nil { + log.Errorf("unable to list batches: %v", err) + return + } + + completed := 0 + + for _, batch := range batches { + state := batch.State() + + switch { + case state == tapgarden.BatchStatePending || + state == tapgarden.BatchStateFrozen || + state == tapgarden.BatchStateCommitted || + state == tapgarden.BatchStateBroadcast || + state == tapgarden.BatchStateConfirmed: + + if state == tapgarden.BatchStatePending { + a.pendingBatches.WithLabelValues( + batch.BatchKey.PubKey.X().String(), + ).Set( + float64(len(batch.Seedlings)), + ) + } + + case state == tapgarden.BatchStateFinalized || + state == tapgarden.BatchStateSeedlingCancelled || + state == tapgarden.BatchStateSproutCancelled: + + a.pendingBatches.DeleteLabelValues( + batch.BatchKey.PubKey.X().String(), + ) + + if state == tapgarden.BatchStateFinalized { + completed += 1 + } + } + } + + a.completedBatches.Set(float64(completed)) + + a.pendingBatches.Collect(ch) + a.completedBatches.Collect(ch) +} diff --git a/monitoring/interface.go b/monitoring/interface.go deleted file mode 100644 index 0b9151315..000000000 --- a/monitoring/interface.go +++ /dev/null @@ -1,29 +0,0 @@ -package monitoring - -import "github.com/prometheus/client_golang/prometheus" - -// metricGroupFactory is a factory method that given the primary prometheus -// config, will create a new MetricGroup that will be managed by the main -// PrometheusExporter. -type metricGroupFactory func(*PrometheusConfig) (MetricGroup, error) - -// MetricGroup is the primary interface of this package. The main exporter (in -// this case the PrometheusExporter), will manage these directly, ensuring that -// all MetricGroups are registered before the main prometheus exporter starts -// and any additional tracing is added. -type MetricGroup interface { - // Collector is the embedded interface that forces every MetricGroup to - // also be a collector. - prometheus.Collector - - // Name is the name of the metric group. When exported to prometheus, - // it's expected that all metric under this group have the same prefix. - Name() string - - // RegisterMetricFuncs signals to the underlying hybrid collector that - // it should register all metrics that it aims to export with the - // global Prometheus registry. Rather than using the series of - // "MustRegister" directives, implementers of this interface should - // instead propagate back any errors related to metric registration. - RegisterMetricFuncs() error -} diff --git a/monitoring/prometheus.go b/monitoring/prometheus.go index c19dd647b..b352e65f7 100644 --- a/monitoring/prometheus.go +++ b/monitoring/prometheus.go @@ -4,7 +4,6 @@ import ( "errors" "fmt" "net/http" - "sync" "time" //nolint:lll @@ -15,35 +14,29 @@ import ( ) var ( - // metricGroups is a global variable of all registered metrics - // projected by the mutex below. All new MetricGroups should add - // themselves to this map within the init() method of their file. - metricGroups = make(map[string]metricGroupFactory) - - // activeGroups is a global map of all active metric groups. This can - // be used by some of the "static' package level methods to look up the - // target metric group to export observations. - activeGroups = make(map[string]MetricGroup) - - // metricsMtx is a global mutex that should be held when accessing the - // global maps. - metricsMtx sync.Mutex - // serverMetrics is a global variable that holds the Prometheus metrics // for the gRPC server. serverMetrics *grpc_prometheus.ServerMetrics ) +const ( + // dbTimeout is the default database timeout. + dbTimeout = 20 * time.Second +) + // PrometheusExporter is a metric exporter that uses Prometheus directly. The // internal server will interact with this struct in order to export relevant // metrics. type PrometheusExporter struct { - config *PrometheusConfig + config *PrometheusConfig + registry *prometheus.Registry } // Start registers all relevant metrics with the Prometheus library, then // launches the HTTP server that Prometheus will hit to scrape our metrics. func (p *PrometheusExporter) Start() error { + log.Infof("Starting Prometheus Exporter") + // If we're not active, then there's nothing more to do. if !p.config.Active { return nil @@ -54,28 +47,43 @@ func (p *PrometheusExporter) Start() error { return fmt.Errorf("server metrics not set") } - reg := prometheus.NewRegistry() - reg.MustRegister(collectors.NewProcessCollector( + // Create a custom Prometheus registry. + p.registry = prometheus.NewRegistry() + p.registry.MustRegister(collectors.NewProcessCollector( collectors.ProcessCollectorOpts{}, )) - reg.MustRegister(collectors.NewGoCollector()) - reg.MustRegister(serverMetrics) + p.registry.MustRegister(collectors.NewGoCollector()) + p.registry.MustRegister(serverMetrics) - // Make ensure that all metrics exist when collecting and querying. - serverMetrics.InitializeMetrics(p.config.RPCServer) + uniStatsCollector, err := newUniverseStatsCollector(p.config, p.registry) + if err != nil { + return err + } + p.registry.MustRegister(uniStatsCollector) - // Next, we'll attempt to register all our metrics. If we fail to - // register ANY metric, then we'll fail all together. - if err := p.registerMetrics(); err != nil { + assetBalancesCollecor, err := + newAssetBalancesCollector(p.config, p.registry) + if err != nil { return err } + p.registry.MustRegister(assetBalancesCollecor) + + gardenCollector, err := newGardenCollector(p.config, p.registry) + if err != nil { + return err + } + p.registry.MustRegister(gardenCollector) + + // Make ensure that all metrics exist when collecting and querying. + serverMetrics.InitializeMetrics(p.config.RPCServer) // Finally, we'll launch the HTTP server that Prometheus will use to - // scape our metrics. + // scrape our metrics. go func() { + // Use our custom prometheus registry. promMux := http.NewServeMux() promMux.Handle("/metrics", promhttp.HandlerFor( - reg, promhttp.HandlerOpts{ + p.registry, promhttp.HandlerOpts{ EnableOpenMetrics: true, MaxRequestsInFlight: 1, }), @@ -98,61 +106,3 @@ func (p *PrometheusExporter) Start() error { return nil } - -// registerMetrics iterates through all the registered metric groups and -// attempts to register each one. If any of the MetricGroups fail to register, -// then an error will be returned. -func (p *PrometheusExporter) registerMetrics() error { - metricsMtx.Lock() - defer metricsMtx.Unlock() - - for _, metricGroupFunc := range metricGroups { - metricGroup, err := metricGroupFunc(p.config) - if err != nil { - return err - } - - if err := metricGroup.RegisterMetricFuncs(); err != nil { - return err - } - - activeGroups[metricGroup.Name()] = metricGroup - } - - return nil -} - -// gauges is a map type that maps a gauge to its unique name. -type gauges map[string]*prometheus.GaugeVec // nolint:unused - -// addGauge adds a new gauge vector to the map. -func (g gauges) addGauge(name, help string, labels []string) { // nolint:unused - g[name] = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Name: name, - Help: help, - }, - labels, - ) -} - -// describe describes all gauges contained in the map to the given channel. -func (g gauges) describe(ch chan<- *prometheus.Desc) { // nolint:unused - for _, gauge := range g { - gauge.Describe(ch) - } -} - -// collect collects all metrics of the map's gauges to the given channel. -func (g gauges) collect(ch chan<- prometheus.Metric) { // nolint:unused - for _, gauge := range g { - gauge.Collect(ch) - } -} - -// reset resets all gauges in the map. -func (g gauges) reset() { // nolint:unused - for _, gauge := range g { - gauge.Reset() - } -} diff --git a/proof/archive.go b/proof/archive.go index 9dd38f472..9530a616f 100644 --- a/proof/archive.go +++ b/proof/archive.go @@ -107,6 +107,11 @@ type Archiver interface { // returned. FetchProof(ctx context.Context, id Locator) (Blob, error) + // HasProof returns true if the proof for the given locator exists. This + // is intended to be a performance optimized lookup compared to fetching + // a proof and checking for ErrProofNotFound. + HasProof(ctx context.Context, id Locator) (bool, error) + // FetchProofs fetches all proofs for assets uniquely identified by the // passed asset ID. FetchProofs(ctx context.Context, id asset.ID) ([]*AnnotatedProof, error) @@ -125,11 +130,103 @@ type Archiver interface { // NotifyArchiver is an Archiver that also allows callers to subscribe to // notifications about new proofs being added to the archiver. type NotifyArchiver interface { - Archiver + // FetchProof fetches a proof for an asset uniquely identified by the + // passed Identifier. The returned blob is expected to be the encoded + // full proof file, containing the complete provenance of the asset. + // + // If a proof cannot be found, then ErrProofNotFound should be returned. + FetchProof(ctx context.Context, id Locator) (Blob, error) fn.EventPublisher[Blob, []*Locator] } +// MultiArchiveNotifier is a NotifyArchiver that wraps several other archives +// and notifies subscribers about new proofs that are added to any of the +// archives. +type MultiArchiveNotifier struct { + archives []NotifyArchiver +} + +// NewMultiArchiveNotifier creates a new MultiArchiveNotifier based on the set +// of specified backends. +func NewMultiArchiveNotifier(archives ...NotifyArchiver) *MultiArchiveNotifier { + return &MultiArchiveNotifier{ + archives: archives, + } +} + +// FetchProof fetches a proof for an asset uniquely identified by the passed +// Identifier. The returned proof can either be a full proof file or just a +// single proof. +// +// If a proof cannot be found, then ErrProofNotFound should be returned. +// +// NOTE: This is part of the NotifyArchiver interface. +func (m *MultiArchiveNotifier) FetchProof(ctx context.Context, + id Locator) (Blob, error) { + + for idx := range m.archives { + a := m.archives[idx] + + proofBlob, err := a.FetchProof(ctx, id) + if errors.Is(err, ErrProofNotFound) { + // Try the next archive. + continue + } else if err != nil { + return nil, fmt.Errorf("error fetching proof "+ + "from archive: %w", err) + } + + return proofBlob, nil + } + + return nil, ErrProofNotFound +} + +// RegisterSubscriber adds a new subscriber for receiving events. The +// registration request is forwarded to all registered archives. +func (m *MultiArchiveNotifier) RegisterSubscriber( + receiver *fn.EventReceiver[Blob], deliverExisting bool, + deliverFrom []*Locator) error { + + for idx := range m.archives { + a := m.archives[idx] + + err := a.RegisterSubscriber( + receiver, deliverExisting, deliverFrom, + ) + if err != nil { + return fmt.Errorf("error registering subscriber: %w", + err) + } + } + + return nil +} + +// RemoveSubscriber removes the given subscriber and also stops it from +// processing events. The removal request is forwarded to all registered +// archives. +func (m *MultiArchiveNotifier) RemoveSubscriber( + subscriber *fn.EventReceiver[Blob]) error { + + for idx := range m.archives { + a := m.archives[idx] + + err := a.RemoveSubscriber(subscriber) + if err != nil { + return fmt.Errorf("error removing subscriber: "+ + "%w", err) + } + } + + return nil +} + +// A compile-time interface to ensure MultiArchiveNotifier meets the +// NotifyArchiver interface. +var _ NotifyArchiver = (*MultiArchiveNotifier)(nil) + // FileArchiver implements proof Archiver backed by an on-disk file system. The // archiver takes a single root directory then creates the following overlap // mapping: @@ -216,6 +313,22 @@ func (f *FileArchiver) FetchProof(_ context.Context, id Locator) (Blob, error) { return proofFile, nil } +// HasProof returns true if the proof for the given locator exists. This is +// intended to be a performance optimized lookup compared to fetching a proof +// and checking for ErrProofNotFound. +func (f *FileArchiver) HasProof(_ context.Context, id Locator) (bool, error) { + // All our on-disk storage is based on asset IDs, so to look up a path, + // we just need to compute the full file path and see if it exists on + // disk. + proofPath, err := genProofFilePath(f.proofPath, id) + if err != nil { + return false, fmt.Errorf("unable to make proof file path: %w", + err) + } + + return lnrpc.FileExists(proofPath), nil +} + // FetchProofs fetches all proofs for assets uniquely identified by the passed // asset ID. func (f *FileArchiver) FetchProofs(_ context.Context, @@ -407,6 +520,27 @@ func (m *MultiArchiver) FetchProof(ctx context.Context, return nil, ErrProofNotFound } +// HasProof returns true if the proof for the given locator exists. This is +// intended to be a performance optimized lookup compared to fetching a proof +// and checking for ErrProofNotFound. The multi archiver only considers a proof +// to be present if all backends have it. +func (m *MultiArchiver) HasProof(ctx context.Context, id Locator) (bool, error) { + for _, archive := range m.backends { + ok, err := archive.HasProof(ctx, id) + if err != nil { + return false, err + } + + // We are expecting all backends to have the proof, otherwise we + // consider the proof not to be found. + if !ok { + return false, nil + } + } + + return true, nil +} + // FetchProofs fetches all proofs for assets uniquely identified by the passed // asset ID. func (m *MultiArchiver) FetchProofs(ctx context.Context, diff --git a/proof/courier.go b/proof/courier.go index ae323ee08..c3586751f 100644 --- a/proof/courier.go +++ b/proof/courier.go @@ -11,6 +11,7 @@ import ( "time" "github.com/btcsuite/btcd/btcec/v2" + "github.com/davecgh/go-spew/spew" "github.com/lightninglabs/lightning-node-connect/hashmailrpc" "github.com/lightninglabs/taproot-assets/asset" "github.com/lightninglabs/taproot-assets/fn" @@ -66,207 +67,153 @@ type Courier interface { // SetSubscribers sets the set of subscribers that will be notified // of proof courier related events. SetSubscribers(map[uint64]*fn.EventReceiver[fn.Event]) -} - -// CourierAddr is a fully validated courier address (including protocol specific -// validation). -type CourierAddr interface { - // Url returns the url.URL representation of the courier address. - Url() *url.URL - // NewCourier generates a new courier service handle. - NewCourier(ctx context.Context, cfg *CourierCfg, - recipient Recipient) (Courier, error) + // Close stops the courier instance. + Close() error } -// ParseCourierAddrString parses a proof courier address string and returns a -// protocol specific courier address instance. -func ParseCourierAddrString(addr string) (CourierAddr, error) { - // Parse URI. - urlAddr, err := url.ParseRequestURI(addr) - if err != nil { - return nil, fmt.Errorf("invalid proof courier URI address: %w", - err) - } +// CourierCfg contains general config parameters applicable to all proof +// couriers. +type CourierCfg struct { + // HashMailCfg contains hashmail protocol specific config parameters. + HashMailCfg *HashMailCourierCfg - return ParseCourierAddrUrl(*urlAddr) -} + // UniverseRpcCfg contains universe RPC protocol specific config + // parameters. + UniverseRpcCfg *UniverseRpcCourierCfg -// ParseCourierAddrUrl parses a proof courier address url.URL and returns a -// protocol specific courier address instance. -func ParseCourierAddrUrl(addr url.URL) (CourierAddr, error) { - // Create new courier addr based on URL scheme. - switch addr.Scheme { - case HashmailCourierType: - return NewHashMailCourierAddr(addr) - case UniverseRpcCourierType: - return NewUniverseRpcCourierAddr(addr) - } + // TransferLog is a log for recording proof delivery and retrieval + // attempts. + TransferLog TransferLog +} - return nil, fmt.Errorf("unknown courier address protocol "+ - "(consider updating tapd): %v", addr.Scheme) +// CourierDispatch is an interface that abstracts away the different proof +// courier services that are supported. +type CourierDispatch interface { + // NewCourier instantiates a new courier service handle given a service + // URL address. + NewCourier(addr *url.URL, recipient Recipient) (Courier, error) } -// HashMailCourierAddr is a hashmail protocol specific implementation of the -// CourierAddr interface. -type HashMailCourierAddr struct { - addr url.URL +// URLDispatch is a proof courier dispatch that uses the courier address URL +// scheme to determine which courier service to use. +type URLDispatch struct { + cfg *CourierCfg } -// Url returns the url.URL representation of the hashmail courier address. -func (h *HashMailCourierAddr) Url() *url.URL { - return &h.addr +// NewCourierDispatch creates a new proof courier dispatch. +func NewCourierDispatch(cfg *CourierCfg) *URLDispatch { + return &URLDispatch{ + cfg: cfg, + } } -// NewCourier generates a new courier service handle. -func (h *HashMailCourierAddr) NewCourier(_ context.Context, cfg *CourierCfg, +// NewCourier instantiates a new courier service handle given a service URL +// address. +func (u *URLDispatch) NewCourier(addr *url.URL, recipient Recipient) (Courier, error) { - backoffHandle := NewBackoffHandler(cfg.BackoffCfg, cfg.TransferLog) + subscribers := make(map[uint64]*fn.EventReceiver[fn.Event]) - hashMailCfg := HashMailCourierCfg{ - ReceiverAckTimeout: cfg.ReceiverAckTimeout, - } + // Create new courier addr based on URL scheme. + switch addr.Scheme { + case HashmailCourierType: + cfg := u.cfg.HashMailCfg + backoffHandler := NewBackoffHandler( + cfg.BackoffCfg, u.cfg.TransferLog, + ) - hashMailBox, err := NewHashMailBox(&h.addr) - if err != nil { - return nil, fmt.Errorf("unable to make mailbox: %v", - err) - } + hashMailCfg := HashMailCourierCfg{ + ReceiverAckTimeout: cfg.ReceiverAckTimeout, + } - subscribers := make( - map[uint64]*fn.EventReceiver[fn.Event], - ) - return &HashMailCourier{ - cfg: &hashMailCfg, - backoffHandle: backoffHandle, - recipient: recipient, - mailbox: hashMailBox, - subscribers: subscribers, - }, nil -} + hashMailBox, err := NewHashMailBox(addr) + if err != nil { + return nil, fmt.Errorf("unable to make mailbox: %v", + err) + } -// NewHashMailCourierAddr generates a new hashmail courier address from a given -// URL. This function also performs hashmail protocol specific address -// validation. -func NewHashMailCourierAddr(addr url.URL) (*HashMailCourierAddr, error) { - if addr.Scheme != HashmailCourierType { - return nil, fmt.Errorf("expected hashmail courier protocol: %v", - addr.Scheme) - } + return &HashMailCourier{ + cfg: &hashMailCfg, + backoffHandle: backoffHandler, + recipient: recipient, + mailbox: hashMailBox, + subscribers: subscribers, + }, nil - // We expect the port number to be specified for a hashmail service. - if addr.Port() == "" { - return nil, fmt.Errorf("hashmail proof courier URI address " + - "port unspecified") - } + case UniverseRpcCourierType: + cfg := u.cfg.UniverseRpcCfg + backoffHandler := NewBackoffHandler( + cfg.BackoffCfg, u.cfg.TransferLog, + ) - return &HashMailCourierAddr{ - addr, - }, nil -} + // Connect to the universe RPC server. + dialOpts, err := serverDialOpts() + if err != nil { + return nil, err + } -// UniverseRpcCourierAddr is a universe RPC protocol specific implementation of -// the CourierAddr interface. -type UniverseRpcCourierAddr struct { - addr url.URL -} + serverAddr := fmt.Sprintf("%s:%s", addr.Hostname(), addr.Port()) + conn, err := grpc.Dial(serverAddr, dialOpts...) + if err != nil { + return nil, err + } -// Url returns the url.URL representation of the courier address. -func (h *UniverseRpcCourierAddr) Url() *url.URL { - return &h.addr -} + client := unirpc.NewUniverseClient(conn) -// NewCourier generates a new courier service handle. -func (h *UniverseRpcCourierAddr) NewCourier(_ context.Context, - cfg *CourierCfg, recipient Recipient) (Courier, error) { - - // Skip the initial delivery delay for the universe RPC courier. - // This courier skips the initial delay because it uses the backoff - // procedure for each proof within a proof file separately. - // Consequently, if we attempt to perform two consecutive send events - // which share the same proof lineage (matching ancestral proofs), the - // second send event will be delayed by the initial delay. - cfg.BackoffCfg.SkipInitDelay = true - backoffHandle := NewBackoffHandler(cfg.BackoffCfg, cfg.TransferLog) - - // Ensure that the courier address is a universe RPC address. - if h.addr.Scheme != UniverseRpcCourierType { - return nil, fmt.Errorf("unsupported courier protocol: %v", - h.addr.Scheme) + return &UniverseRpcCourier{ + recipient: recipient, + client: client, + backoffHandle: backoffHandler, + transfer: u.cfg.TransferLog, + subscribers: subscribers, + rawConn: conn, + }, nil + + default: + return nil, fmt.Errorf("unknown courier address protocol "+ + "(consider updating tapd): %v", addr.Scheme) } +} - // Connect to the universe RPC server. - dialOpts, err := serverDialOpts() +// A compile-time assertion to ensure that the URLDispatch meets the +// CourierDispatch interface. +var _ CourierDispatch = (*URLDispatch)(nil) + +// ParseCourierAddress attempts to parse the given string as a proof courier +// address, validates that all required fields are present and ensures the +// protocol is one of the supported protocols. +func ParseCourierAddress(addr string) (*url.URL, error) { + urlAddr, err := url.ParseRequestURI(addr) if err != nil { - return nil, err + return nil, fmt.Errorf("invalid proof courier URI address: %w", + err) } - serverAddr := fmt.Sprintf( - "%s:%s", h.addr.Hostname(), h.addr.Port(), - ) - conn, err := grpc.Dial(serverAddr, dialOpts...) - if err != nil { + if err := ValidateCourierAddress(urlAddr); err != nil { return nil, err } - client := unirpc.NewUniverseClient(conn) - - // Instantiate the events subscribers map. - subscribers := make( - map[uint64]*fn.EventReceiver[fn.Event], - ) - - return &UniverseRpcCourier{ - recipient: recipient, - client: client, - backoffHandle: backoffHandle, - transfer: cfg.TransferLog, - subscribers: subscribers, - }, nil + return urlAddr, nil } -// NewUniverseRpcCourierAddr generates a new universe RPC courier address from a -// given URL. This function also performs protocol specific address validation. -func NewUniverseRpcCourierAddr(addr url.URL) (*UniverseRpcCourierAddr, error) { +// ValidateCourierAddress validates that all required fields are present and +// ensures the protocol is one of the supported protocols. +func ValidateCourierAddress(addr *url.URL) error { // We expect the port number to be specified. if addr.Port() == "" { - return nil, fmt.Errorf("proof courier URI address port " + - "unspecified") + return fmt.Errorf("proof courier URI address port unspecified") } - return &UniverseRpcCourierAddr{ - addr, - }, nil -} - -// NewCourier instantiates a new courier service handle given a service URL -// address. -func NewCourier(ctx context.Context, addr url.URL, cfg *CourierCfg, - recipient Recipient) (Courier, error) { + switch addr.Scheme { + case HashmailCourierType, UniverseRpcCourierType: + // Valid and known courier address protocol. + return nil - courierAddr, err := ParseCourierAddrUrl(addr) - if err != nil { - return nil, err + default: + return fmt.Errorf("unknown courier address protocol "+ + "(consider updating tapd): %v", addr.Scheme) } - - return courierAddr.NewCourier(ctx, cfg, recipient) -} - -// CourierCfg contains general config parameters applicable to all proof -// couriers. -type CourierCfg struct { - // ReceiverAckTimeout is the maximum time we'll wait for the receiver to - // acknowledge the proof. - ReceiverAckTimeout time.Duration - - // BackoffCfg configures the behaviour of the proof delivery - // functionality. - BackoffCfg *BackoffCfg - - // TransferLog is a log for recording proof delivery and retrieval - // attempts. - TransferLog TransferLog } // ProofMailbox represents an abstract store-and-forward mailbox that can be @@ -291,11 +238,16 @@ type ProofMailbox interface { // CleanUp attempts to tear down the mailbox as specified by the passed // sid. CleanUp(ctx context.Context, sid streamID) error + + // Close closes the underlying connection to the hashmail server. + Close() error } // HashMailBox is an implementation of the ProofMailbox interface backed by the // hashmailrpc.HashMailClient. type HashMailBox struct { + rawConn *grpc.ClientConn + client hashmailrpc.HashMailClient } @@ -341,7 +293,8 @@ func NewHashMailBox(courierAddr *url.URL) (*HashMailBox, client := hashmailrpc.NewHashMailClient(conn) return &HashMailBox{ - client: client, + client: client, + rawConn: conn, }, nil } @@ -465,7 +418,7 @@ func (h *HashMailBox) RecvAck(ctx context.Context, sid streamID) error { return fmt.Errorf("expected ack, got %x", msg.Msg) } -// CleanUp atempts to tear down the mailbox as specified by the passed sid. +// CleanUp attempts to tear down the mailbox as specified by the passed sid. func (h *HashMailBox) CleanUp(ctx context.Context, sid streamID) error { streamAuth := &hashmailrpc.CipherBoxAuth{ Desc: &hashmailrpc.CipherBoxDesc{ @@ -480,6 +433,11 @@ func (h *HashMailBox) CleanUp(ctx context.Context, sid streamID) error { return err } +// Close closes the underlying connection to the hashmail server. +func (h *HashMailBox) Close() error { + return h.rawConn.Close() +} + // A compile-time assertion to ensure that the HashMailBox meets the // ProofMailbox interface. var _ ProofMailbox = (*HashMailBox)(nil) @@ -541,10 +499,10 @@ func (e *BackoffExecError) Error() string { // BackoffCfg configures the behaviour of the proof delivery backoff procedure. type BackoffCfg struct { - // SkipInitDelay is a flag that indicates whether we should skip - // the initial delay before attempting to deliver the proof to the - // receiver. - SkipInitDelay bool + // SkipInitDelay is a flag that indicates whether we should skip the + // initial delay before attempting to deliver the proof to the receiver + // or receiving from the sender. + SkipInitDelay bool `long:"skipinitdelay" description:"Skip the initial delay before attempting to deliver the proof to the receiver or receiving from the sender."` // BackoffResetWait is the amount of time we'll wait before // resetting the backoff counter to its initial state. @@ -853,6 +811,8 @@ func (h *HashMailCourier) DeliverProof(ctx context.Context, log.Infof("Received ACK from receiver! Cleaning up mailboxes...") + defer h.Close() + // Once we receive this ACK, we can clean up our mailbox and also the // receiver's mailbox. if err := h.mailbox.CleanUp(ctx, senderStreamID); err != nil { @@ -928,6 +888,17 @@ func (h *HashMailCourier) publishSubscriberEvent(event fn.Event) { } } +// Close closes the underlying connection to the hashmail server. +func (h *HashMailCourier) Close() error { + if err := h.mailbox.Close(); err != nil { + log.Warnf("unable to close mailbox session, "+ + "recipient=%v: %v", err, spew.Sdump(h.recipient)) + return err + } + + return nil +} + // BackoffWaitEvent is an event that is sent to a subscriber each time we // wait via the Backoff procedure before retrying to deliver a proof to the // receiver. @@ -1020,6 +991,13 @@ func (h *HashMailCourier) SetSubscribers( // proof.Courier interface. var _ Courier = (*HashMailCourier)(nil) +// UniverseRpcCourierCfg is the config for the universe RPC proof courier. +type UniverseRpcCourierCfg struct { + // BackoffCfg configures the behaviour of the proof delivery + // functionality. + BackoffCfg *BackoffCfg +} + // UniverseRpcCourier is a universe RPC proof courier service handle. It // implements the Courier interface. type UniverseRpcCourier struct { @@ -1030,6 +1008,10 @@ type UniverseRpcCourier struct { // the universe RPC server. client unirpc.UniverseClient + // rawConn is the raw connection that the courier will use to interact + // with the remote gRPC service. + rawConn *grpc.ClientConn + // backoffHandle is a handle to the backoff procedure used in proof // delivery. backoffHandle *BackoffHandler @@ -1155,36 +1137,24 @@ func (c *UniverseRpcCourier) DeliverProof(ctx context.Context, func (c *UniverseRpcCourier) ReceiveProof(ctx context.Context, originLocator Locator) (*AnnotatedProof, error) { - // In order to reconstruct the proof file we must collect all the - // transition proofs that make up the main chain of proofs. That is - // accomplished by iterating backwards through the main chain of proofs - // until we reach the genesis point (minting proof). - - // We will update the locator at each iteration. - loc := originLocator - - // revProofs is a slice of transition proofs ordered from latest to - // earliest (the issuance proof comes last in the slice). This ordering - // is a reversal of that found in the proof file. - var revProofs []Proof - - for { - assetID := *loc.AssetID - + fetchProof := func(ctx context.Context, loc Locator) (Blob, error) { var groupKeyBytes []byte if loc.GroupKey != nil { groupKeyBytes = loc.GroupKey.SerializeCompressed() } - universeID := unirpc.MarshalUniverseID( - assetID[:], groupKeyBytes, - ) - assetKey := unirpc.MarshalAssetKey( - *loc.OutPoint, &loc.ScriptKey, - ) + if loc.OutPoint == nil { + return nil, fmt.Errorf("proof locator for asset %x "+ + "is missing outpoint", loc.AssetID[:]) + } + universeKey := unirpc.UniverseKey{ - Id: universeID, - LeafKey: assetKey, + Id: unirpc.MarshalUniverseID( + loc.AssetID[:], groupKeyBytes, + ), + LeafKey: unirpc.MarshalAssetKey( + *loc.OutPoint, &loc.ScriptKey, + ), } // Setup proof receive/query routine and start backoff @@ -1215,50 +1185,13 @@ func (c *UniverseRpcCourier) ReceiveProof(ctx context.Context, "attempt has failed: %w", err) } - // Decode transition proof from query response. - var transitionProof Proof - if err := transitionProof.Decode( - bytes.NewReader(proofBlob), - ); err != nil { - return nil, err - } - - revProofs = append(revProofs, transitionProof) - - // Break if we've reached the genesis point (the asset is the - // genesis asset). - proofAsset := transitionProof.Asset - if proofAsset.IsGenesisAsset() { - break - } - - // Update locator with principal input to the current outpoint. - prevID, err := transitionProof.Asset.PrimaryPrevID() - if err != nil { - return nil, err - } - - // Parse script key public key. - scriptKeyPubKey, err := btcec.ParsePubKey(prevID.ScriptKey[:]) - if err != nil { - return nil, fmt.Errorf("failed to parse script key "+ - "public key from Proof.PrevID: %w", err) - } - loc.ScriptKey = *scriptKeyPubKey - - loc.AssetID = &prevID.ID - loc.OutPoint = &prevID.OutPoint + return proofBlob, nil } - // Append proofs to proof file in reverse order to their collected - // order. - proofFile := &File{} - for i := len(revProofs) - 1; i >= 0; i-- { - err := proofFile.AppendProof(revProofs[i]) - if err != nil { - return nil, fmt.Errorf("error appending proof to "+ - "proof file: %w", err) - } + proofFile, err := FetchProofProvenance(ctx, originLocator, fetchProof) + if err != nil { + return nil, fmt.Errorf("error fetching proof provenance: %w", + err) } // Encode the full proof file. @@ -1297,6 +1230,11 @@ func (c *UniverseRpcCourier) publishSubscriberEvent(event fn.Event) { } } +// Close closes the courier's connection to the remote gRPC service. +func (c *UniverseRpcCourier) Close() error { + return c.rawConn.Close() +} + // A compile-time assertion to ensure the UniverseRpcCourier meets the // proof.Courier interface. var _ Courier = (*UniverseRpcCourier)(nil) @@ -1328,3 +1266,83 @@ type TransferLog interface { QueryProofTransferLog(context.Context, Locator, TransferType) ([]time.Time, error) } + +// FetchProofProvenance iterates backwards through the main chain of proofs +// until it reaches the genesis point (the asset is the genesis asset) and then +// returns the full proof file with the full provenance for the asset. +func FetchProofProvenance(ctx context.Context, originLocator Locator, + fetchSingleProof func(context.Context, Locator) (Blob, error)) (*File, + error) { + + // In order to reconstruct the proof file we must collect all the + // transition proofs that make up the main chain of proofs. That is + // accomplished by iterating backwards through the main chain of proofs + // until we reach the genesis point (minting proof). + + // We will update the locator at each iteration. + currentLocator := originLocator + + // reversedProofs is a slice of transition proofs ordered from latest to + // earliest (the issuance proof comes last in the slice). This ordering + // is a reversal of that found in the proof file. + var reversedProofs []Blob + for { + // Setup proof receive/query routine and start backoff + // procedure. + proofBlob, err := fetchSingleProof(ctx, currentLocator) + if err != nil { + return nil, fmt.Errorf("fetching single proof "+ + "failed: %w", err) + } + + // Decode just the asset leaf record from the proof. + var proofAsset asset.Asset + assetRecord := AssetLeafRecord(&proofAsset) + err = SparseDecode(bytes.NewReader(proofBlob), assetRecord) + if err != nil { + return nil, fmt.Errorf("unable to decode proof: %w", + err) + } + + reversedProofs = append(reversedProofs, proofBlob) + + // Break if we've reached the genesis point (the asset is the + // genesis asset). + if proofAsset.IsGenesisAsset() { + break + } + + // Update locator with principal input to the current outpoint. + prevID, err := proofAsset.PrimaryPrevID() + if err != nil { + return nil, err + } + + // Parse script key public key. + scriptKeyPubKey, err := btcec.ParsePubKey(prevID.ScriptKey[:]) + if err != nil { + return nil, fmt.Errorf("failed to parse script key "+ + "public key from Proof.PrevID: %w", err) + } + + currentLocator = Locator{ + AssetID: &prevID.ID, + GroupKey: originLocator.GroupKey, + ScriptKey: *scriptKeyPubKey, + OutPoint: &prevID.OutPoint, + } + } + + // Append proofs to proof file in reverse order to their collected + // order. + proofFile := &File{} + for i := len(reversedProofs) - 1; i >= 0; i-- { + err := proofFile.AppendProofRaw(reversedProofs[i]) + if err != nil { + return nil, fmt.Errorf("error appending proof to "+ + "proof file: %w", err) + } + } + + return proofFile, nil +} diff --git a/proof/file.go b/proof/file.go index 3e53e9316..e3c0998d3 100644 --- a/proof/file.go +++ b/proof/file.go @@ -393,6 +393,26 @@ func (f *File) AppendProof(proof Proof) error { return nil } +// AppendProofRaw appends a raw proof to the file and calculates its chained +// hash. +func (f *File) AppendProofRaw(proof []byte) error { + if f.IsUnknownVersion() { + return ErrUnknownVersion + } + + var prevHash [sha256.Size]byte + if !f.IsEmpty() { + prevHash = f.proofs[len(f.proofs)-1].hash + } + + f.proofs = append(f.proofs, &hashedProof{ + proofBytes: proof, + hash: hashProof(proof, prevHash), + }) + + return nil +} + // ReplaceLastProof attempts to replace the last proof in the file with another // one, updating its chained hash in the process. func (f *File) ReplaceLastProof(proof Proof) error { diff --git a/proof/mint.go b/proof/mint.go index 8c73372e2..737ff5014 100644 --- a/proof/mint.go +++ b/proof/mint.go @@ -11,9 +11,86 @@ import ( "github.com/lightninglabs/taproot-assets/commitment" ) -// Blob represents a serialized proof file, including the checksum. +// Blob either represents a serialized proof file, including the checksum or a +// single serialized issuance/transition proof. Which one it is can be found out +// from the leading magic bytes (or the helper methods that inspect those). type Blob []byte +// IsFile returns true if the blob is a serialized proof file. +func (b Blob) IsFile() bool { + return IsProofFile(b) +} + +// IsSingleProof returns true if the blob is a serialized single proof. +func (b Blob) IsSingleProof() bool { + return IsSingleProof(b) +} + +// AsFile returns the blob as a parsed file. If the blob is a single proof, it +// will be parsed as a file with a single proof. +func (b Blob) AsFile() (*File, error) { + switch { + // We have a full file, we can just parse it and return it. + case b.IsFile(): + file := NewEmptyFile(V0) + if err := file.Decode(bytes.NewReader(b)); err != nil { + return nil, fmt.Errorf("error decoding proof file: %w", + err) + } + + return file, nil + + // We have a single proof, so let's parse it and return it directly, + // assuming it is the most recent proof the caller is interested in. + case b.IsSingleProof(): + p := Proof{} + if err := p.Decode(bytes.NewReader(b)); err != nil { + return nil, fmt.Errorf("error decoding single proof: "+ + "%w", err) + } + + file, err := NewFile(V0, p) + if err != nil { + return nil, err + } + + return file, nil + + default: + return nil, fmt.Errorf("unknown proof blob type") + } +} + +// AsSingleProof returns the blob as a parsed single proof. If the blob is a +// full proof file, the parsed last proof of that file will be returned. +func (b Blob) AsSingleProof() (*Proof, error) { + switch { + // We have a full file, we can just parse it and return it. + case b.IsFile(): + file := NewEmptyFile(V0) + if err := file.Decode(bytes.NewReader(b)); err != nil { + return nil, fmt.Errorf("error decoding proof file: %w", + err) + } + + return file.LastProof() + + // We have a single proof, so let's parse it and return it directly, + // assuming it is the most recent proof the caller is interested in. + case b.IsSingleProof(): + p := Proof{} + if err := p.Decode(bytes.NewReader(b)); err != nil { + return nil, fmt.Errorf("error decoding single proof: "+ + "%w", err) + } + + return &p, nil + + default: + return nil, fmt.Errorf("unknown proof blob type") + } +} + // AssetBlobs is a data structure used to pass around the proof files for a // set of assets which may have been created in the same batched transaction. // This maps the script key of the asset to the serialized proof file blob. diff --git a/proof/mock.go b/proof/mock.go index 55c5d1ea6..c4810e8be 100644 --- a/proof/mock.go +++ b/proof/mock.go @@ -5,6 +5,8 @@ import ( "context" "encoding/hex" "io" + "net/url" + "sync" "testing" "time" @@ -13,6 +15,7 @@ import ( "github.com/btcsuite/btcd/wire" "github.com/lightninglabs/taproot-assets/asset" "github.com/lightninglabs/taproot-assets/commitment" + "github.com/lightninglabs/taproot-assets/fn" "github.com/lightninglabs/taproot-assets/internal/test" "github.com/stretchr/testify/require" ) @@ -74,6 +77,94 @@ func MockGroupAnchorVerifier(gen *asset.Genesis, return nil } +// MockProofCourierDispatcher is a mock proof courier dispatcher which returns +// the same courier for all requests. +type MockProofCourierDispatcher struct { + Courier Courier +} + +// NewCourier instantiates a new courier service handle given a service +// URL address. +func (m *MockProofCourierDispatcher) NewCourier(*url.URL, Recipient) (Courier, + error) { + + return m.Courier, nil +} + +// MockProofCourier is a mock proof courier which stores the last proof it +// received. +type MockProofCourier struct { + sync.Mutex + + currentProofs map[asset.SerializedKey]*AnnotatedProof + + subscribers map[uint64]*fn.EventReceiver[fn.Event] +} + +// NewMockProofCourier returns a new mock proof courier. +func NewMockProofCourier() *MockProofCourier { + return &MockProofCourier{ + currentProofs: make(map[asset.SerializedKey]*AnnotatedProof), + } +} + +// Start starts the proof courier service. +func (m *MockProofCourier) Start(chan error) error { + return nil +} + +// Stop stops the proof courier service. +func (m *MockProofCourier) Stop() error { + return nil +} + +// DeliverProof attempts to delivery a proof to the receiver, using the +// information in the Addr type. +func (m *MockProofCourier) DeliverProof(_ context.Context, + proof *AnnotatedProof) error { + + m.Lock() + defer m.Unlock() + + m.currentProofs[asset.ToSerialized(&proof.ScriptKey)] = proof + + return nil +} + +// ReceiveProof attempts to obtain a proof as identified by the passed +// locator from the source encapsulated within the specified address. +func (m *MockProofCourier) ReceiveProof(_ context.Context, + loc Locator) (*AnnotatedProof, error) { + + m.Lock() + defer m.Unlock() + + proof, ok := m.currentProofs[asset.ToSerialized(&loc.ScriptKey)] + if !ok { + return nil, ErrProofNotFound + } + + return proof, nil +} + +// SetSubscribers sets the set of subscribers that will be notified +// of proof courier related events. +func (m *MockProofCourier) SetSubscribers( + subscribers map[uint64]*fn.EventReceiver[fn.Event]) { + + m.Lock() + defer m.Unlock() + + m.subscribers = subscribers +} + +// Close stops the courier instance. +func (m *MockProofCourier) Close() error { + return nil +} + +var _ Courier = (*MockProofCourier)(nil) + type ValidTestCase struct { Proof *TestProof `json:"proof"` Expected string `json:"expected"` diff --git a/proof/verifier.go b/proof/verifier.go index 098a1ac17..18c3e9b81 100644 --- a/proof/verifier.go +++ b/proof/verifier.go @@ -327,11 +327,11 @@ func (p *Proof) verifyGenesisReveal() error { // verifyGenesisGroupKey verifies that the group key attached to the asset in // this proof has already been verified. -func (p *Proof) verfyGenesisGroupKey(groupVerifier GroupVerifier) error { +func (p *Proof) verifyGenesisGroupKey(groupVerifier GroupVerifier) error { groupKey := p.Asset.GroupKey.GroupPubKey err := groupVerifier(&groupKey) if err != nil { - return ErrGroupKeyUnknown + return fmt.Errorf("%w: %v", ErrGroupKeyUnknown, err) } return nil @@ -487,7 +487,7 @@ func (p *Proof) Verify(ctx context.Context, prev *AssetSnapshot, case isGenesisAsset && hasGroupKey && !hasGroupKeyReveal: // A reissuance must be for an asset group that has already // been imported and verified. - if err := p.verfyGenesisGroupKey(groupVerifier); err != nil { + if err := p.verifyGenesisGroupKey(groupVerifier); err != nil { return nil, err } @@ -500,7 +500,7 @@ func (p *Proof) Verify(ctx context.Context, prev *AssetSnapshot, // 7. Verify group key for asset transfers. Any asset with a group key // must carry a group key that has already been imported and verified. if !isGenesisAsset && hasGroupKey { - if err := p.verfyGenesisGroupKey(groupVerifier); err != nil { + if err := p.verifyGenesisGroupKey(groupVerifier); err != nil { return nil, err } } diff --git a/rpcserver.go b/rpcserver.go index 59f273c2f..ac2809820 100644 --- a/rpcserver.go +++ b/rpcserver.go @@ -1056,19 +1056,14 @@ func (r *rpcServer) NewAddr(ctx context.Context, // the default specified in the config. courierAddr := r.cfg.DefaultProofCourierAddr if req.ProofCourierAddr != "" { - addr, err := proof.ParseCourierAddrString( + var err error + courierAddr, err = proof.ParseCourierAddress( req.ProofCourierAddr, ) if err != nil { return nil, fmt.Errorf("invalid proof courier "+ "address: %w", err) } - - // At this point, we do not intend on creating a proof courier - // service instance. We are only interested in parsing and - // validating the address. We therefore convert the address into - // an url.URL type for storage in the address book. - courierAddr = addr.Url() } // Check that the proof courier address is set. This should never @@ -1077,7 +1072,6 @@ func (r *rpcServer) NewAddr(ctx context.Context, if courierAddr == nil { return nil, fmt.Errorf("no proof courier address provided") } - proofCourierAddr := *courierAddr if len(req.AssetId) != 32 { return nil, fmt.Errorf("invalid asset id length") @@ -1114,8 +1108,7 @@ func (r *rpcServer) NewAddr(ctx context.Context, // Now that we have all the params, we'll try to add a new // address to the addr book. addr, err = r.cfg.AddrBook.NewAddress( - ctx, assetID, req.Amt, tapscriptSibling, - proofCourierAddr, + ctx, assetID, req.Amt, tapscriptSibling, *courierAddr, address.WithAssetVersion(assetVersion), ) if err != nil { @@ -1156,7 +1149,7 @@ func (r *rpcServer) NewAddr(ctx context.Context, // address to the addr book. addr, err = r.cfg.AddrBook.NewAddressWithKeys( ctx, assetID, req.Amt, *scriptKey, internalKey, - tapscriptSibling, proofCourierAddr, + tapscriptSibling, *courierAddr, address.WithAssetVersion(assetVersion), ) if err != nil { @@ -3997,7 +3990,17 @@ func (r *rpcServer) DeleteFederationServer(ctx context.Context, serversToDel := fn.Map(req.Servers, unmarshalUniverseServer) - err := r.cfg.FederationDB.RemoveServers(ctx, serversToDel...) + // Remove the servers from the proofs sync log. This is necessary before + // we can remove the servers from the database because of a foreign + // key constraint. + err := r.cfg.FederationDB.DeleteProofsSyncLogEntries( + ctx, serversToDel..., + ) + if err != nil { + return nil, err + } + + err = r.cfg.FederationDB.RemoveServers(ctx, serversToDel...) if err != nil { return nil, err } diff --git a/server.go b/server.go index fd69e1558..73d03af5b 100644 --- a/server.go +++ b/server.go @@ -7,6 +7,7 @@ import ( "strings" "sync" "sync/atomic" + "time" proxy "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" "github.com/lightninglabs/lndclient" @@ -21,6 +22,7 @@ import ( "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/macaroons" "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" "gopkg.in/macaroon-bakery.v2/bakery" ) @@ -269,6 +271,12 @@ func (s *Server) RunUntilShutdown(mainErrChan <-chan error) error { serverOpts = append(serverOpts, rpcServerOpts...) serverOpts = append(serverOpts, ServerMaxMsgReceiveSize) + keepAliveParams := keepalive.ServerParameters{ + MaxConnectionIdle: time.Minute * 2, + } + + serverOpts = append(serverOpts, grpc.KeepaliveParams(keepAliveParams)) + grpcServer := grpc.NewServer(serverOpts...) defer grpcServer.Stop() @@ -313,6 +321,16 @@ func (s *Server) RunUntilShutdown(mainErrChan <-chan error) error { // configuration. s.cfg.Prometheus.RPCServer = grpcServer + // Provide Prometheus collectors with access to Universe stats. + s.cfg.Prometheus.UniverseStats = s.cfg.UniverseStats + + // Provide Prometheus collectors with access to the asset store. + s.cfg.Prometheus.AssetStore = s.cfg.AssetStore + + // Provide Prometheus collectors with access to the asset + // minter. + s.cfg.Prometheus.AssetMinter = s.cfg.AssetMinter + promExporter, err := monitoring.NewPrometheusExporter( &s.cfg.Prometheus, ) @@ -321,13 +339,13 @@ func (s *Server) RunUntilShutdown(mainErrChan <-chan error) error { err) } - srvrLog.Infof("Prometheus exporter server listening on %v", - s.cfg.Prometheus.ListenAddr) - if err := promExporter.Start(); err != nil { return mkErr("Unable to start prometheus exporter: %v", err) } + + srvrLog.Infof("Prometheus exporter server listening on %v", + s.cfg.Prometheus.ListenAddr) } srvrLog.Infof("Taproot Asset Daemon fully active!") diff --git a/tapcfg/config.go b/tapcfg/config.go index d5f6c5f5c..ebccde50e 100644 --- a/tapcfg/config.go +++ b/tapcfg/config.go @@ -303,8 +303,9 @@ type Config struct { ReOrgSafeDepth int32 `long:"reorgsafedepth" description:"The number of confirmations we'll wait for before considering a transaction safely buried in the chain."` // The following options are used to configure the proof courier. - DefaultProofCourierAddr string `long:"proofcourieraddr" description:"Default proof courier service address."` - HashMailCourier *proof.HashMailCourierCfg `group:"proofcourier" namespace:"hashmailcourier"` + DefaultProofCourierAddr string `long:"proofcourieraddr" description:"Default proof courier service address."` + HashMailCourier *proof.HashMailCourierCfg `group:"hashmailcourier" namespace:"hashmailcourier"` + UniverseRpcCourier *proof.UniverseRpcCourierCfg `group:"universerpccourier" namespace:"universerpccourier"` CustodianProofRetrievalDelay time.Duration `long:"custodianproofretrievaldelay" description:"The number of seconds the custodian waits after identifying an asset transfer on-chain and before retrieving the corresponding proof."` @@ -391,6 +392,15 @@ func DefaultConfig() Config { MaxBackoff: defaultProofTransferMaxBackoff, }, }, + UniverseRpcCourier: &proof.UniverseRpcCourierCfg{ + BackoffCfg: &proof.BackoffCfg{ + SkipInitDelay: true, + BackoffResetWait: defaultProofTransferBackoffResetWait, + NumTries: defaultProofTransferNumTries, + InitialBackoff: defaultProofTransferInitialBackoff, + MaxBackoff: defaultProofTransferMaxBackoff, + }, + }, CustodianProofRetrievalDelay: defaultProofRetrievalDelay, Universe: &UniverseConfig{ SyncInterval: defaultUniverseSyncInterval, diff --git a/tapcfg/server.go b/tapcfg/server.go index 37786b96b..862de4653 100644 --- a/tapcfg/server.go +++ b/tapcfg/server.go @@ -212,7 +212,7 @@ func genServerConfig(cfg *Config, cfgLogger btclog.Logger, fallbackHashmailCourierAddr := fmt.Sprintf( "%s://%s", proof.HashmailCourierType, fallbackHashMailAddr, ) - proofCourierAddr, err := proof.ParseCourierAddrString( + proofCourierAddr, err := proof.ParseCourierAddress( fallbackHashmailCourierAddr, ) if err != nil { @@ -222,7 +222,7 @@ func genServerConfig(cfg *Config, cfgLogger btclog.Logger, // If default proof courier address is set, use it as the default. if cfg.DefaultProofCourierAddr != "" { - proofCourierAddr, err = proof.ParseCourierAddrString( + proofCourierAddr, err = proof.ParseCourierAddress( cfg.DefaultProofCourierAddr, ) if err != nil { @@ -231,18 +231,6 @@ func genServerConfig(cfg *Config, cfgLogger btclog.Logger, } } - // TODO(ffranr): This logic is leftover for integration tests which - // do not yet enable a proof courier. Remove once all integration tests - // support a proof courier. - var proofCourierCfg *proof.CourierCfg - if cfg.HashMailCourier != nil { - proofCourierCfg = &proof.CourierCfg{ - ReceiverAckTimeout: cfg.HashMailCourier.ReceiverAckTimeout, - BackoffCfg: cfg.HashMailCourier.BackoffCfg, - TransferLog: assetStore, - } - } - reOrgWatcher := tapgarden.NewReOrgWatcher(&tapgarden.ReOrgWatcherConfig{ ChainBridge: chainBridge, GroupVerifier: tapgarden.GenGroupVerifier( @@ -330,6 +318,17 @@ func genServerConfig(cfg *Config, cfgLogger btclog.Logger, ChainParams: &tapChainParams, }) + // Addresses can have different proof couriers configured, but both + // types of couriers that currently exist will receive this config upon + // initialization. + proofCourierDispatcher := proof.NewCourierDispatch(&proof.CourierCfg{ + HashMailCfg: cfg.HashMailCourier, + UniverseRpcCfg: cfg.UniverseRpcCourier, + TransferLog: assetStore, + }) + + multiNotifier := proof.NewMultiArchiveNotifier(assetStore, multiverse) + return &tap.Config{ DebugLevel: cfg.DebugLevel, RuntimeID: runtimeID, @@ -362,19 +361,18 @@ func genServerConfig(cfg *Config, cfgLogger btclog.Logger, GroupVerifier: tapgarden.GenGroupVerifier( context.Background(), assetMintingStore, ), - AddrBook: addrBook, - ProofArchive: proofArchive, - ProofNotifier: assetStore, - ErrChan: mainErrChan, - ProofCourierCfg: proofCourierCfg, - ProofRetrievalDelay: cfg.CustodianProofRetrievalDelay, - ProofWatcher: reOrgWatcher, + AddrBook: addrBook, + ProofArchive: proofArchive, + ProofNotifier: multiNotifier, + ErrChan: mainErrChan, + ProofCourierDispatcher: proofCourierDispatcher, + ProofRetrievalDelay: cfg.CustodianProofRetrievalDelay, ProofWatcher: reOrgWatcher, }, ), ChainBridge: chainBridge, AddrBook: addrBook, AddrBookDisableSyncer: cfg.AddrBook.DisableSyncer, - DefaultProofCourierAddr: proofCourierAddr.Url(), + DefaultProofCourierAddr: proofCourierAddr, ProofArchive: proofArchive, AssetWallet: assetWallet, CoinSelect: coinSelect, @@ -387,13 +385,13 @@ func genServerConfig(cfg *Config, cfgLogger btclog.Logger, GroupVerifier: tapgarden.GenGroupVerifier( context.Background(), assetMintingStore, ), - Wallet: walletAnchor, - KeyRing: keyRing, - AssetWallet: assetWallet, - AssetProofs: proofFileStore, - ProofCourierCfg: proofCourierCfg, - ProofWatcher: reOrgWatcher, - ErrChan: mainErrChan, + Wallet: walletAnchor, + KeyRing: keyRing, + AssetWallet: assetWallet, + AssetProofs: proofFileStore, + ProofCourierDispatcher: proofCourierDispatcher, + ProofWatcher: reOrgWatcher, + ErrChan: mainErrChan, }, ), UniverseArchive: baseUni, diff --git a/tapdb/assets_store.go b/tapdb/assets_store.go index 2699445eb..606622601 100644 --- a/tapdb/assets_store.go +++ b/tapdb/assets_store.go @@ -175,6 +175,10 @@ type ActiveAssetsStore interface { FetchAssetProof(ctx context.Context, scriptKey []byte) (AssetProofI, error) + // HasAssetProof returns true if we have proof for a given asset + // identified by its script key. + HasAssetProof(ctx context.Context, scriptKey []byte) (bool, error) + // FetchAssetProofsByAssetID fetches all asset proofs for a given asset // ID. FetchAssetProofsByAssetID(ctx context.Context, @@ -1238,6 +1242,38 @@ func (a *AssetStore) FetchProof(ctx context.Context, return diskProof, nil } +// HasProof returns true if the proof for the given locator exists. This is +// intended to be a performance optimized lookup compared to fetching a proof +// and checking for ErrProofNotFound. +func (a *AssetStore) HasProof(ctx context.Context, locator proof.Locator) (bool, + error) { + + // We don't need anything else but the script key since we have an + // on-disk index for all proofs we store. + var ( + scriptKey = locator.ScriptKey + readOpts = NewAssetStoreReadTx() + haveProof bool + ) + dbErr := a.db.ExecTx(ctx, &readOpts, func(q ActiveAssetsStore) error { + proofAvailable, err := q.HasAssetProof( + ctx, scriptKey.SerializeCompressed(), + ) + if err != nil { + return fmt.Errorf("unable to find out if we have "+ + "asset proof: %w", err) + } + + haveProof = proofAvailable + return nil + }) + if dbErr != nil { + return false, dbErr + } + + return haveProof, nil +} + // FetchProofs fetches all proofs for assets uniquely identified by the passed // asset ID. // diff --git a/tapdb/assets_store_test.go b/tapdb/assets_store_test.go index 85a914033..82c607300 100644 --- a/tapdb/assets_store_test.go +++ b/tapdb/assets_store_test.go @@ -246,105 +246,17 @@ func assertAssetEqual(t *testing.T, a, b *asset.Asset) { func TestImportAssetProof(t *testing.T) { t.Parallel() - // First, we'll create a new instance of the database. - _, assetStore, db := newAssetStore(t) - - // Next, we'll make a new random asset that also has a few inputs with - // dummy witness information. - testAsset := randAsset(t) - - assetRoot, err := commitment.NewAssetCommitment(testAsset) - require.NoError(t, err) - - taprootAssetRoot, err := commitment.NewTapCommitment(assetRoot) - require.NoError(t, err) - - // With our asset created, we can now create the AnnotatedProof we use - // to import assets into the database. - var blockHash chainhash.Hash - _, err = rand.Read(blockHash[:]) - require.NoError(t, err) + var ( + ctxb = context.Background() - anchorTx := wire.NewMsgTx(2) - anchorTx.AddTxIn(&wire.TxIn{}) - anchorTx.AddTxOut(&wire.TxOut{ - PkScript: bytes.Repeat([]byte{0x01}, 34), - Value: 10, - }) + dbHandle = NewDbHandle(t) + assetStore = dbHandle.AssetStore + ) + // Add a random asset and corresponding proof into the database. + testAsset, testProof := dbHandle.AddRandomAssetProof(t) assetID := testAsset.ID() - anchorPoint := wire.OutPoint{ - Hash: anchorTx.TxHash(), - Index: 0, - } - initialBlob := bytes.Repeat([]byte{0x0}, 100) - updatedBlob := bytes.Repeat([]byte{0x77}, 100) - testProof := &proof.AnnotatedProof{ - Locator: proof.Locator{ - AssetID: &assetID, - ScriptKey: *testAsset.ScriptKey.PubKey, - }, - Blob: initialBlob, - AssetSnapshot: &proof.AssetSnapshot{ - Asset: testAsset, - OutPoint: anchorPoint, - AnchorBlockHash: blockHash, - AnchorBlockHeight: test.RandInt[uint32](), - AnchorTxIndex: test.RandInt[uint32](), - AnchorTx: anchorTx, - OutputIndex: 0, - InternalKey: test.RandPubKey(t), - ScriptRoot: taprootAssetRoot, - }, - } - if testAsset.GroupKey != nil { - testProof.GroupKey = &testAsset.GroupKey.GroupPubKey - } - - // We'll now insert the internal key information as well as the script - // key ahead of time to reflect the address creation that happens - // elsewhere. - ctxb := context.Background() - _, err = db.UpsertInternalKey(ctxb, InternalKey{ - RawKey: testProof.InternalKey.SerializeCompressed(), - KeyFamily: test.RandInt[int32](), - KeyIndex: test.RandInt[int32](), - }) - require.NoError(t, err) - rawScriptKeyID, err := db.UpsertInternalKey(ctxb, InternalKey{ - RawKey: testAsset.ScriptKey.RawKey.PubKey.SerializeCompressed(), - KeyFamily: int32(testAsset.ScriptKey.RawKey.Family), - KeyIndex: int32(testAsset.ScriptKey.RawKey.Index), - }) - require.NoError(t, err) - _, err = db.UpsertScriptKey(ctxb, NewScriptKey{ - InternalKeyID: rawScriptKeyID, - TweakedScriptKey: testAsset.ScriptKey.PubKey.SerializeCompressed(), - Tweak: nil, - }) - require.NoError(t, err) - - // We'll add the chain transaction of the proof now to simulate a - // batched transfer on a higher layer. - var anchorTxBuf bytes.Buffer - err = testProof.AnchorTx.Serialize(&anchorTxBuf) - require.NoError(t, err) - anchorTXID := testProof.AnchorTx.TxHash() - _, err = db.UpsertChainTx(ctxb, ChainTxParams{ - Txid: anchorTXID[:], - RawTx: anchorTxBuf.Bytes(), - BlockHeight: sqlInt32(testProof.AnchorBlockHeight), - BlockHash: testProof.AnchorBlockHash[:], - TxIndex: sqlInt32(testProof.AnchorTxIndex), - }) - require.NoError(t, err, "unable to insert chain tx: %w", err) - - // With all our test data constructed, we'll now attempt to import the - // asset into the database. - require.NoError(t, assetStore.ImportProofs( - ctxb, proof.MockHeaderVerifier, proof.MockGroupVerifier, false, - testProof, - )) + initialBlob := testProof.Blob // We should now be able to retrieve the set of all assets inserted on // disk. @@ -371,7 +283,7 @@ func TestImportAssetProof(t *testing.T) { ScriptKey: *testAsset.ScriptKey.PubKey, }) require.NoError(t, err) - require.Equal(t, initialBlob, []byte(currentBlob)) + require.Equal(t, initialBlob, currentBlob) // We should also be able to fetch the created asset above based on // either the asset ID, or key group via the main coin selection @@ -391,6 +303,8 @@ func TestImportAssetProof(t *testing.T) { // We'll now attempt to overwrite the proof with one that has different // block information (simulating a re-org). + updatedBlob := bytes.Repeat([]byte{0x77}, 100) + testProof.AnchorBlockHash = chainhash.Hash{12, 34, 56} testProof.AnchorBlockHeight = 1234 testProof.AnchorTxIndex = 5678 diff --git a/tapdb/migrations.go b/tapdb/migrations.go index 117feac90..4c288380f 100644 --- a/tapdb/migrations.go +++ b/tapdb/migrations.go @@ -2,17 +2,52 @@ package tapdb import ( "bytes" + "errors" "io" "io/fs" "net/http" "strings" + "github.com/btcsuite/btclog" "github.com/golang-migrate/migrate/v4" "github.com/golang-migrate/migrate/v4/database" "github.com/golang-migrate/migrate/v4/source/httpfs" ) -// applyMigrations executes all database migration files found in the given file +// migrationLogger is a logger that wraps the passed btclog.Logger so it can be +// used to log migrations. +type migrationLogger struct { + log btclog.Logger +} + +// Printf is like fmt.Printf. We map this to the target logger based on the +// current log level. +func (m *migrationLogger) Printf(format string, v ...interface{}) { + // Trim trailing newlines from the format. + format = strings.TrimRight(format, "\n") + + switch m.log.Level() { + case btclog.LevelTrace: + m.log.Tracef(format, v...) + case btclog.LevelDebug: + m.log.Debugf(format, v...) + case btclog.LevelInfo: + m.log.Infof(format, v...) + case btclog.LevelWarn: + m.log.Warnf(format, v...) + case btclog.LevelError: + m.log.Errorf(format, v...) + case btclog.LevelCritical: + m.log.Criticalf(format, v...) + } +} + +// Verbose should return true when verbose logging output is wanted +func (m *migrationLogger) Verbose() bool { + return m.log.Level() <= btclog.LevelDebug +} + +// applyMigrations executes database migration files found in the given file // system under the given path, using the passed database driver and database // name. func applyMigrations(fs fs.FS, driver database.Driver, path, @@ -36,8 +71,16 @@ func applyMigrations(fs fs.FS, driver database.Driver, path, if err != nil { return err } + + migrationVersion, _, _ := sqlMigrate.Version() + + log.Infof("Applying migrations from version=%v", migrationVersion) + + // Apply our local logger to the migration instance. + sqlMigrate.Log = &migrationLogger{log} + err = sqlMigrate.Up() - if err != nil && err != migrate.ErrNoChange { + if err != nil && !errors.Is(err, migrate.ErrNoChange) { return err } diff --git a/tapdb/multiverse.go b/tapdb/multiverse.go index 12bbb81b8..edc62cb07 100644 --- a/tapdb/multiverse.go +++ b/tapdb/multiverse.go @@ -1,6 +1,7 @@ package tapdb import ( + "bytes" "context" "crypto/sha256" "database/sql" @@ -476,15 +477,23 @@ type MultiverseStore struct { proofCache *proofCache leafKeysCache *universeLeafCache + + // transferProofDistributor is an event distributor that will be used to + // notify subscribers about new proof leaves that are added to the + // multiverse. This is used to notify the custodian about new incoming + // proofs. And since the custodian is only interested in transfer + // proofs, we only signal on transfer proofs. + transferProofDistributor *fn.EventDistributor[proof.Blob] } // NewMultiverseStore creates a new multiverse DB store handle. func NewMultiverseStore(db BatchedMultiverse) *MultiverseStore { return &MultiverseStore{ - db: db, - rootNodeCache: newRootNodeCache(), - proofCache: newProofCache(), - leafKeysCache: newUniverseLeafCache(), + db: db, + rootNodeCache: newRootNodeCache(), + proofCache: newProofCache(), + leafKeysCache: newUniverseLeafCache(), + transferProofDistributor: fn.NewEventDistributor[proof.Blob](), } } @@ -850,6 +859,78 @@ func (b *MultiverseStore) FetchProofLeaf(ctx context.Context, return proofs, nil } +// FetchProof fetches a proof for an asset uniquely identified by the passed +// Locator. The returned blob contains the encoded full proof file, representing +// the complete provenance of the asset. +// +// If a proof cannot be found, then ErrProofNotFound is returned. +// +// NOTE: This is part of the proof.NotifyArchiver interface. +func (b *MultiverseStore) FetchProof(ctx context.Context, + originLocator proof.Locator) (proof.Blob, error) { + + // The universe only delivers a single proof at a time, so we need a + // callback that we can feed into proof.FetchProofProvenance to assemble + // the full proof file. + fetchProof := func(ctx context.Context, loc proof.Locator) (proof.Blob, + error) { + + uniID := universe.Identifier{ + AssetID: *loc.AssetID, + GroupKey: loc.GroupKey, + ProofType: universe.ProofTypeTransfer, + } + scriptKey := asset.NewScriptKey(&loc.ScriptKey) + leafKey := universe.LeafKey{ + ScriptKey: &scriptKey, + } + if loc.OutPoint != nil { + leafKey.OutPoint = *loc.OutPoint + } + + proofs, err := b.FetchProofLeaf(ctx, uniID, leafKey) + if errors.Is(err, universe.ErrNoUniverseProofFound) { + // If we didn't find a proof, maybe we arrived at the + // issuance proof, in which case we need to adjust the + // proof type. + uniID.ProofType = universe.ProofTypeIssuance + proofs, err = b.FetchProofLeaf(ctx, uniID, leafKey) + + // If we still didn't find a proof, then we'll return + // the proof not found error, but the one from the proof + // package, not the universe package, as the Godoc for + // this method in the proof.NotifyArchiver states. + if errors.Is(err, universe.ErrNoUniverseProofFound) { + return nil, proof.ErrProofNotFound + } + } + if err != nil { + return nil, fmt.Errorf("error fetching proof from "+ + "archive: %w", err) + } + + if len(proofs) > 1 { + return nil, fmt.Errorf("expected only one proof, "+ + "got %d", len(proofs)) + } + + return proofs[0].Leaf.RawProof, nil + } + + file, err := proof.FetchProofProvenance(ctx, originLocator, fetchProof) + if err != nil { + return nil, fmt.Errorf("error fetching proof from archive: %w", + err) + } + + var buf bytes.Buffer + if err := file.Encode(&buf); err != nil { + return nil, fmt.Errorf("error encoding proof file: %w", err) + } + + return buf.Bytes(), nil +} + // UpsertProofLeaf upserts a proof leaf within the multiverse tree and the // universe tree that corresponds to the given key. func (b *MultiverseStore) UpsertProofLeaf(ctx context.Context, @@ -944,6 +1025,14 @@ func (b *MultiverseStore) UpsertProofLeaf(ctx context.Context, b.proofCache.delProofsForAsset(id) b.leafKeysCache.wipeCache(idStr) + // Notify subscribers about the new proof leaf, now that we're sure we + // have written it to the database. But we only care about transfer + // proofs, as the events are received by the custodian to finalize + // inbound transfers. + if id.ProofType == universe.ProofTypeTransfer { + b.transferProofDistributor.NotifySubscribers(leaf.RawProof) + } + return issuanceProof, nil } @@ -1024,6 +1113,18 @@ func (b *MultiverseStore) UpsertProofLeafBatch(ctx context.Context, b.rootNodeCache.wipeCache() + // Notify subscribers about the new proof leaves, now that we're sure we + // have written them to the database. But we only care about transfer + // proofs, as the events are received by the custodian to finalize + // inbound transfers. + for idx := range items { + if items[idx].ID.ProofType == universe.ProofTypeTransfer { + b.transferProofDistributor.NotifySubscribers( + items[idx].Leaf.RawProof, + ) + } + } + // Invalidate the root node cache for all the assets we just inserted. idsToDelete := fn.NewSet(fn.Map(items, func(item *universe.Item) treeID { return treeID(item.ID.String()) @@ -1074,3 +1175,68 @@ func (b *MultiverseStore) DeleteUniverse(ctx context.Context, return id.String(), dbErr } + +// RegisterSubscriber adds a new subscriber for receiving events. The +// deliverExisting boolean indicates whether already existing items should be +// sent to the NewItemCreated channel when the subscription is started. An +// optional deliverFrom can be specified to indicate from which timestamp/index/ +// marker onward existing items should be delivered on startup. If deliverFrom +// is nil/zero/empty then all existing items will be delivered. +func (b *MultiverseStore) RegisterSubscriber( + receiver *fn.EventReceiver[proof.Blob], deliverExisting bool, + deliverFrom []*proof.Locator) error { + + b.transferProofDistributor.RegisterSubscriber(receiver) + + // No delivery of existing items requested, we're done here. + if !deliverExisting { + return nil + } + + ctx := context.Background() + for _, loc := range deliverFrom { + if loc.AssetID == nil { + return fmt.Errorf("missing asset ID") + } + + id := universe.Identifier{ + AssetID: *loc.AssetID, + GroupKey: loc.GroupKey, + ProofType: universe.ProofTypeTransfer, + } + scriptKey := asset.NewScriptKey(&loc.ScriptKey) + key := universe.LeafKey{ + ScriptKey: &scriptKey, + } + + if loc.OutPoint != nil { + key.OutPoint = *loc.OutPoint + } + + leaves, err := b.FetchProofLeaf(ctx, id, key) + if err != nil { + return err + } + + // Deliver the found leaves to the new item queue of the + // subscriber. + for idx := range leaves { + rawProof := leaves[idx].Leaf.RawProof + receiver.NewItemCreated.ChanIn() <- rawProof + } + } + + return nil +} + +// RemoveSubscriber removes the given subscriber and also stops it from +// processing events. +func (b *MultiverseStore) RemoveSubscriber( + subscriber *fn.EventReceiver[proof.Blob]) error { + + return b.transferProofDistributor.RemoveSubscriber(subscriber) +} + +// A compile-time interface to ensure MultiverseStore meets the +// proof.NotifyArchiver interface. +var _ proof.NotifyArchiver = (*MultiverseStore)(nil) diff --git a/tapdb/sqlc/assets.sql.go b/tapdb/sqlc/assets.sql.go index 5a30c91ea..a89066174 100644 --- a/tapdb/sqlc/assets.sql.go +++ b/tapdb/sqlc/assets.sql.go @@ -1598,6 +1598,27 @@ func (q *Queries) GenesisPoints(ctx context.Context) ([]GenesisPoint, error) { return items, nil } +const hasAssetProof = `-- name: HasAssetProof :one +WITH asset_info AS ( + SELECT assets.asset_id + FROM assets + JOIN script_keys + ON assets.script_key_id = script_keys.script_key_id + WHERE script_keys.tweaked_script_key = $1 +) +SELECT COUNT(asset_info.asset_id) > 0 as has_proof +FROM asset_proofs +JOIN asset_info + ON asset_info.asset_id = asset_proofs.asset_id +` + +func (q *Queries) HasAssetProof(ctx context.Context, tweakedScriptKey []byte) (bool, error) { + row := q.db.QueryRowContext(ctx, hasAssetProof, tweakedScriptKey) + var has_proof bool + err := row.Scan(&has_proof) + return has_proof, err +} + const insertAssetSeedling = `-- name: InsertAssetSeedling :exec INSERT INTO asset_seedlings ( asset_name, asset_type, asset_version, asset_supply, asset_meta_id, diff --git a/tapdb/sqlc/migrations/000013_universe_fed_proof_sync_log.down.sql b/tapdb/sqlc/migrations/000013_universe_fed_proof_sync_log.down.sql new file mode 100644 index 000000000..42bdbfbb8 --- /dev/null +++ b/tapdb/sqlc/migrations/000013_universe_fed_proof_sync_log.down.sql @@ -0,0 +1,2 @@ +DROP INDEX IF EXISTS federation_proof_sync_log_unique_index_proof_leaf_id_servers_id; +DROP TABLE IF EXISTS federation_proof_sync_log; \ No newline at end of file diff --git a/tapdb/sqlc/migrations/000013_universe_fed_proof_sync_log.up.sql b/tapdb/sqlc/migrations/000013_universe_fed_proof_sync_log.up.sql new file mode 100644 index 000000000..ba7e7c100 --- /dev/null +++ b/tapdb/sqlc/migrations/000013_universe_fed_proof_sync_log.up.sql @@ -0,0 +1,36 @@ +-- This table stores the log of federation universe proof sync attempts. Rows +-- in this table are specific to a given proof leaf, server, and sync direction. +CREATE TABLE IF NOT EXISTS federation_proof_sync_log ( + id BIGINT PRIMARY KEY, + + -- The status of the proof sync attempt. + status TEXT NOT NULL CHECK(status IN ('pending', 'complete')), + + -- The timestamp of when the log entry for the associated proof was last + -- updated. + timestamp TIMESTAMP NOT NULL, + + -- The number of attempts that have been made to sync the proof. + attempt_counter BIGINT NOT NULL DEFAULT 0, + + -- The direction of the proof sync attempt. + sync_direction TEXT NOT NULL CHECK(sync_direction IN ('push', 'pull')), + + -- The ID of the subject proof leaf. + proof_leaf_id BIGINT NOT NULL REFERENCES universe_leaves(id), + + -- The ID of the universe that the proof leaf belongs to. + universe_root_id BIGINT NOT NULL REFERENCES universe_roots(id), + + -- The ID of the server that the proof will be/was synced to. + servers_id BIGINT NOT NULL REFERENCES universe_servers(id) +); + +-- Create a unique index on table federation_proof_sync_log +CREATE UNIQUE INDEX federation_proof_sync_log_unique_index_proof_leaf_id_servers_id +ON federation_proof_sync_log ( + sync_direction, + proof_leaf_id, + universe_root_id, + servers_id +); \ No newline at end of file diff --git a/tapdb/sqlc/models.go b/tapdb/sqlc/models.go index b89980ab2..26eef9b5d 100644 --- a/tapdb/sqlc/models.go +++ b/tapdb/sqlc/models.go @@ -164,6 +164,17 @@ type FederationGlobalSyncConfig struct { AllowSyncExport bool } +type FederationProofSyncLog struct { + ID int64 + Status string + Timestamp time.Time + AttemptCounter int64 + SyncDirection string + ProofLeafID int64 + UniverseRootID int64 + ServersID int64 +} + type FederationUniSyncConfig struct { Namespace string AssetID []byte diff --git a/tapdb/sqlc/querier.go b/tapdb/sqlc/querier.go index ee001f911..71fad7803 100644 --- a/tapdb/sqlc/querier.go +++ b/tapdb/sqlc/querier.go @@ -25,6 +25,7 @@ type Querier interface { DeleteAllNodes(ctx context.Context, namespace string) (int64, error) DeleteAssetWitnesses(ctx context.Context, assetID int64) error DeleteExpiredUTXOLeases(ctx context.Context, now sql.NullTime) error + DeleteFederationProofSyncLog(ctx context.Context, arg DeleteFederationProofSyncLogParams) error DeleteManagedUTXO(ctx context.Context, outpoint []byte) error DeleteNode(ctx context.Context, arg DeleteNodeParams) (int64, error) DeleteRoot(ctx context.Context, namespace string) (int64, error) @@ -78,6 +79,7 @@ type Querier interface { GenesisAssets(ctx context.Context) ([]GenesisAsset, error) GenesisPoints(ctx context.Context) ([]GenesisPoint, error) GetRootKey(ctx context.Context, id []byte) (Macaroon, error) + HasAssetProof(ctx context.Context, tweakedScriptKey []byte) (bool, error) InsertAddr(ctx context.Context, arg InsertAddrParams) (int64, error) InsertAssetSeedling(ctx context.Context, arg InsertAssetSeedlingParams) error InsertAssetSeedlingIntoBatch(ctx context.Context, arg InsertAssetSeedlingIntoBatchParams) error @@ -94,7 +96,6 @@ type Querier interface { InsertPassiveAsset(ctx context.Context, arg InsertPassiveAssetParams) error InsertRootKey(ctx context.Context, arg InsertRootKeyParams) error InsertUniverseServer(ctx context.Context, arg InsertUniverseServerParams) error - ListUniverseServers(ctx context.Context) ([]UniverseServer, error) LogProofTransferAttempt(ctx context.Context, arg LogProofTransferAttemptParams) error LogServerSync(ctx context.Context, arg LogServerSyncParams) error NewMintingBatch(ctx context.Context, arg NewMintingBatchParams) error @@ -122,6 +123,9 @@ type Querier interface { QueryAssets(ctx context.Context, arg QueryAssetsParams) ([]QueryAssetsRow, error) QueryEventIDs(ctx context.Context, arg QueryEventIDsParams) ([]QueryEventIDsRow, error) QueryFederationGlobalSyncConfigs(ctx context.Context) ([]FederationGlobalSyncConfig, error) + // Join on mssmt_nodes to get leaf related fields. + // Join on genesis_info_view to get leaf related fields. + QueryFederationProofSyncLog(ctx context.Context, arg QueryFederationProofSyncLogParams) ([]QueryFederationProofSyncLogRow, error) QueryFederationUniSyncConfigs(ctx context.Context) ([]FederationUniSyncConfig, error) QueryPassiveAssets(ctx context.Context, transferID int64) ([]QueryPassiveAssetsRow, error) QueryProofTransferAttempts(ctx context.Context, arg QueryProofTransferAttemptsParams) ([]time.Time, error) @@ -129,6 +133,7 @@ type Querier interface { // root, simplifies queries QueryUniverseAssetStats(ctx context.Context, arg QueryUniverseAssetStatsParams) ([]QueryUniverseAssetStatsRow, error) QueryUniverseLeaves(ctx context.Context, arg QueryUniverseLeavesParams) ([]QueryUniverseLeavesRow, error) + QueryUniverseServers(ctx context.Context, arg QueryUniverseServersParams) ([]UniverseServer, error) QueryUniverseStats(ctx context.Context) (QueryUniverseStatsRow, error) ReAnchorPassiveAssets(ctx context.Context, arg ReAnchorPassiveAssetsParams) error SetAddrManaged(ctx context.Context, arg SetAddrManagedParams) error @@ -145,6 +150,7 @@ type Querier interface { UpsertAssetProof(ctx context.Context, arg UpsertAssetProofParams) error UpsertChainTx(ctx context.Context, arg UpsertChainTxParams) (int64, error) UpsertFederationGlobalSyncConfig(ctx context.Context, arg UpsertFederationGlobalSyncConfigParams) error + UpsertFederationProofSyncLog(ctx context.Context, arg UpsertFederationProofSyncLogParams) (int64, error) UpsertFederationUniSyncConfig(ctx context.Context, arg UpsertFederationUniSyncConfigParams) error UpsertGenesisAsset(ctx context.Context, arg UpsertGenesisAssetParams) (int64, error) UpsertGenesisPoint(ctx context.Context, prevOut []byte) (int64, error) diff --git a/tapdb/sqlc/queries/assets.sql b/tapdb/sqlc/queries/assets.sql index cc33881e7..84a68ab5d 100644 --- a/tapdb/sqlc/queries/assets.sql +++ b/tapdb/sqlc/queries/assets.sql @@ -690,6 +690,19 @@ FROM asset_proofs JOIN asset_info ON asset_info.asset_id = asset_proofs.asset_id; +-- name: HasAssetProof :one +WITH asset_info AS ( + SELECT assets.asset_id + FROM assets + JOIN script_keys + ON assets.script_key_id = script_keys.script_key_id + WHERE script_keys.tweaked_script_key = $1 +) +SELECT COUNT(asset_info.asset_id) > 0 as has_proof +FROM asset_proofs +JOIN asset_info + ON asset_info.asset_id = asset_proofs.asset_id; + -- name: InsertAssetWitness :exec INSERT INTO asset_witnesses ( asset_id, prev_out_point, prev_asset_id, prev_script_key, witness_stack, diff --git a/tapdb/sqlc/queries/universe.sql b/tapdb/sqlc/queries/universe.sql index cab652573..767c50f25 100644 --- a/tapdb/sqlc/queries/universe.sql +++ b/tapdb/sqlc/queries/universe.sql @@ -115,8 +115,11 @@ UPDATE universe_servers SET last_sync_time = @new_sync_time WHERE server_host = @target_server; --- name: ListUniverseServers :many -SELECT * FROM universe_servers; +-- name: QueryUniverseServers :many +SELECT * FROM universe_servers +WHERE (id = sqlc.narg('id') OR sqlc.narg('id') IS NULL) AND + (server_host = sqlc.narg('server_host') + OR sqlc.narg('server_host') IS NULL); -- name: InsertNewSyncEvent :exec WITH group_key_root_id AS ( @@ -361,4 +364,120 @@ ON CONFLICT(namespace) -- name: QueryFederationUniSyncConfigs :many SELECT namespace, asset_id, group_key, proof_type, allow_sync_insert, allow_sync_export FROM federation_uni_sync_config -ORDER BY group_key NULLS LAST, asset_id NULLS LAST, proof_type; \ No newline at end of file +ORDER BY group_key NULLS LAST, asset_id NULLS LAST, proof_type; + +-- name: UpsertFederationProofSyncLog :one +INSERT INTO federation_proof_sync_log as log ( + status, timestamp, sync_direction, proof_leaf_id, universe_root_id, + servers_id +) VALUES ( + @status, @timestamp, @sync_direction, + ( + -- Select the leaf id from the universe_leaves table. + SELECT id + FROM universe_leaves + WHERE leaf_node_namespace = @leaf_namespace + AND minting_point = @leaf_minting_point_bytes + AND script_key_bytes = @leaf_script_key_bytes + LIMIT 1 + ), + ( + -- Select the universe root id from the universe_roots table. + SELECT id + FROM universe_roots + WHERE namespace_root = @universe_id_namespace + LIMIT 1 + ), + ( + -- Select the server id from the universe_servers table. + SELECT id + FROM universe_servers + WHERE server_host = @server_host + LIMIT 1 + ) +) ON CONFLICT (sync_direction, proof_leaf_id, universe_root_id, servers_id) +DO UPDATE SET + status = EXCLUDED.status, + timestamp = EXCLUDED.timestamp, + -- Increment the attempt counter. + attempt_counter = CASE + WHEN @bump_sync_attempt_counter = true THEN log.attempt_counter + 1 + ELSE log.attempt_counter + END +RETURNING id; + +-- name: QueryFederationProofSyncLog :many +SELECT + log.id, status, timestamp, sync_direction, attempt_counter, + + -- Select fields from the universe_servers table. + server.id as server_id, + server.server_host, + + -- Select universe leaf related fields. + leaf.minting_point as leaf_minting_point_bytes, + leaf.script_key_bytes as leaf_script_key_bytes, + mssmt_node.value as leaf_genesis_proof, + genesis.gen_asset_id as leaf_gen_asset_id, + genesis.asset_id as leaf_asset_id, + + -- Select fields from the universe_roots table. + root.asset_id as uni_asset_id, + root.group_key as uni_group_key, + root.proof_type as uni_proof_type + +FROM federation_proof_sync_log as log + +JOIN universe_leaves as leaf + ON leaf.id = log.proof_leaf_id + +-- Join on mssmt_nodes to get leaf related fields. +JOIN mssmt_nodes mssmt_node + ON leaf.leaf_node_key = mssmt_node.key AND + leaf.leaf_node_namespace = mssmt_node.namespace + +-- Join on genesis_info_view to get leaf related fields. +JOIN genesis_info_view genesis + ON leaf.asset_genesis_id = genesis.gen_asset_id + +JOIN universe_servers as server + ON server.id = log.servers_id + +JOIN universe_roots as root + ON root.id = log.universe_root_id + +WHERE (log.sync_direction = sqlc.narg('sync_direction') + OR sqlc.narg('sync_direction') IS NULL) + AND + (log.status = sqlc.narg('status') OR sqlc.narg('status') IS NULL) + AND + + -- Universe leaves WHERE clauses. + (leaf.leaf_node_namespace = sqlc.narg('leaf_namespace') + OR sqlc.narg('leaf_namespace') IS NULL) + AND + (leaf.minting_point = sqlc.narg('leaf_minting_point_bytes') + OR sqlc.narg('leaf_minting_point_bytes') IS NULL) + AND + (leaf.script_key_bytes = sqlc.narg('leaf_script_key_bytes') + OR sqlc.narg('leaf_script_key_bytes') IS NULL); + +-- name: DeleteFederationProofSyncLog :exec +WITH selected_server_id AS ( + -- Select the server ids from the universe_servers table for the specified + -- hosts. + SELECT id + FROM universe_servers + WHERE + (server_host = sqlc.narg('server_host') + OR sqlc.narg('server_host') IS NULL) +) +DELETE FROM federation_proof_sync_log +WHERE + servers_id IN (SELECT id FROM selected_server_id) AND + (status = sqlc.narg('status') + OR sqlc.narg('status') IS NULL) AND + (timestamp >= sqlc.narg('min_timestamp') + OR sqlc.narg('min_timestamp') IS NULL) AND + (attempt_counter >= sqlc.narg('min_attempt_counter') + OR sqlc.narg('min_attempt_counter') IS NULL); \ No newline at end of file diff --git a/tapdb/sqlc/universe.sql.go b/tapdb/sqlc/universe.sql.go index 53452cab5..b1e7b33ec 100644 --- a/tapdb/sqlc/universe.sql.go +++ b/tapdb/sqlc/universe.sql.go @@ -11,6 +11,44 @@ import ( "time" ) +const deleteFederationProofSyncLog = `-- name: DeleteFederationProofSyncLog :exec +WITH selected_server_id AS ( + -- Select the server ids from the universe_servers table for the specified + -- hosts. + SELECT id + FROM universe_servers + WHERE + (server_host = $4 + OR $4 IS NULL) +) +DELETE FROM federation_proof_sync_log +WHERE + servers_id IN (SELECT id FROM selected_server_id) AND + (status = $1 + OR $1 IS NULL) AND + (timestamp >= $2 + OR $2 IS NULL) AND + (attempt_counter >= $3 + OR $3 IS NULL) +` + +type DeleteFederationProofSyncLogParams struct { + Status sql.NullString + MinTimestamp sql.NullTime + MinAttemptCounter sql.NullInt64 + ServerHost sql.NullString +} + +func (q *Queries) DeleteFederationProofSyncLog(ctx context.Context, arg DeleteFederationProofSyncLogParams) error { + _, err := q.db.ExecContext(ctx, deleteFederationProofSyncLog, + arg.Status, + arg.MinTimestamp, + arg.MinAttemptCounter, + arg.ServerHost, + ) + return err +} + const deleteUniverseEvents = `-- name: DeleteUniverseEvents :exec WITH root_id AS ( SELECT id @@ -265,33 +303,6 @@ func (q *Queries) InsertUniverseServer(ctx context.Context, arg InsertUniverseSe return err } -const listUniverseServers = `-- name: ListUniverseServers :many -SELECT id, server_host, last_sync_time FROM universe_servers -` - -func (q *Queries) ListUniverseServers(ctx context.Context) ([]UniverseServer, error) { - rows, err := q.db.QueryContext(ctx, listUniverseServers) - if err != nil { - return nil, err - } - defer rows.Close() - var items []UniverseServer - for rows.Next() { - var i UniverseServer - if err := rows.Scan(&i.ID, &i.ServerHost, &i.LastSyncTime); err != nil { - return nil, err - } - items = append(items, i) - } - if err := rows.Close(); err != nil { - return nil, err - } - if err := rows.Err(); err != nil { - return nil, err - } - return items, nil -} - const logServerSync = `-- name: LogServerSync :exec UPDATE universe_servers SET last_sync_time = $1 @@ -429,6 +440,134 @@ func (q *Queries) QueryFederationGlobalSyncConfigs(ctx context.Context) ([]Feder return items, nil } +const queryFederationProofSyncLog = `-- name: QueryFederationProofSyncLog :many +SELECT + log.id, status, timestamp, sync_direction, attempt_counter, + + -- Select fields from the universe_servers table. + server.id as server_id, + server.server_host, + + -- Select universe leaf related fields. + leaf.minting_point as leaf_minting_point_bytes, + leaf.script_key_bytes as leaf_script_key_bytes, + mssmt_node.value as leaf_genesis_proof, + genesis.gen_asset_id as leaf_gen_asset_id, + genesis.asset_id as leaf_asset_id, + + -- Select fields from the universe_roots table. + root.asset_id as uni_asset_id, + root.group_key as uni_group_key, + root.proof_type as uni_proof_type + +FROM federation_proof_sync_log as log + +JOIN universe_leaves as leaf + ON leaf.id = log.proof_leaf_id + +JOIN mssmt_nodes mssmt_node + ON leaf.leaf_node_key = mssmt_node.key AND + leaf.leaf_node_namespace = mssmt_node.namespace + +JOIN genesis_info_view genesis + ON leaf.asset_genesis_id = genesis.gen_asset_id + +JOIN universe_servers as server + ON server.id = log.servers_id + +JOIN universe_roots as root + ON root.id = log.universe_root_id + +WHERE (log.sync_direction = $1 + OR $1 IS NULL) + AND + (log.status = $2 OR $2 IS NULL) + AND + + -- Universe leaves WHERE clauses. + (leaf.leaf_node_namespace = $3 + OR $3 IS NULL) + AND + (leaf.minting_point = $4 + OR $4 IS NULL) + AND + (leaf.script_key_bytes = $5 + OR $5 IS NULL) +` + +type QueryFederationProofSyncLogParams struct { + SyncDirection sql.NullString + Status sql.NullString + LeafNamespace sql.NullString + LeafMintingPointBytes []byte + LeafScriptKeyBytes []byte +} + +type QueryFederationProofSyncLogRow struct { + ID int64 + Status string + Timestamp time.Time + SyncDirection string + AttemptCounter int64 + ServerID int64 + ServerHost string + LeafMintingPointBytes []byte + LeafScriptKeyBytes []byte + LeafGenesisProof []byte + LeafGenAssetID int64 + LeafAssetID []byte + UniAssetID []byte + UniGroupKey []byte + UniProofType string +} + +// Join on mssmt_nodes to get leaf related fields. +// Join on genesis_info_view to get leaf related fields. +func (q *Queries) QueryFederationProofSyncLog(ctx context.Context, arg QueryFederationProofSyncLogParams) ([]QueryFederationProofSyncLogRow, error) { + rows, err := q.db.QueryContext(ctx, queryFederationProofSyncLog, + arg.SyncDirection, + arg.Status, + arg.LeafNamespace, + arg.LeafMintingPointBytes, + arg.LeafScriptKeyBytes, + ) + if err != nil { + return nil, err + } + defer rows.Close() + var items []QueryFederationProofSyncLogRow + for rows.Next() { + var i QueryFederationProofSyncLogRow + if err := rows.Scan( + &i.ID, + &i.Status, + &i.Timestamp, + &i.SyncDirection, + &i.AttemptCounter, + &i.ServerID, + &i.ServerHost, + &i.LeafMintingPointBytes, + &i.LeafScriptKeyBytes, + &i.LeafGenesisProof, + &i.LeafGenAssetID, + &i.LeafAssetID, + &i.UniAssetID, + &i.UniGroupKey, + &i.UniProofType, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + const queryFederationUniSyncConfigs = `-- name: QueryFederationUniSyncConfigs :many SELECT namespace, asset_id, group_key, proof_type, allow_sync_insert, allow_sync_export FROM federation_uni_sync_config @@ -688,6 +827,41 @@ func (q *Queries) QueryUniverseLeaves(ctx context.Context, arg QueryUniverseLeav return items, nil } +const queryUniverseServers = `-- name: QueryUniverseServers :many +SELECT id, server_host, last_sync_time FROM universe_servers +WHERE (id = $1 OR $1 IS NULL) AND + (server_host = $2 + OR $2 IS NULL) +` + +type QueryUniverseServersParams struct { + ID sql.NullInt64 + ServerHost sql.NullString +} + +func (q *Queries) QueryUniverseServers(ctx context.Context, arg QueryUniverseServersParams) ([]UniverseServer, error) { + rows, err := q.db.QueryContext(ctx, queryUniverseServers, arg.ID, arg.ServerHost) + if err != nil { + return nil, err + } + defer rows.Close() + var items []UniverseServer + for rows.Next() { + var i UniverseServer + if err := rows.Scan(&i.ID, &i.ServerHost, &i.LastSyncTime); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + const queryUniverseStats = `-- name: QueryUniverseStats :one WITH stats AS ( SELECT total_asset_syncs, total_asset_proofs @@ -869,6 +1043,76 @@ func (q *Queries) UpsertFederationGlobalSyncConfig(ctx context.Context, arg Upse return err } +const upsertFederationProofSyncLog = `-- name: UpsertFederationProofSyncLog :one +INSERT INTO federation_proof_sync_log as log ( + status, timestamp, sync_direction, proof_leaf_id, universe_root_id, + servers_id +) VALUES ( + $1, $2, $3, + ( + -- Select the leaf id from the universe_leaves table. + SELECT id + FROM universe_leaves + WHERE leaf_node_namespace = $4 + AND minting_point = $5 + AND script_key_bytes = $6 + LIMIT 1 + ), + ( + -- Select the universe root id from the universe_roots table. + SELECT id + FROM universe_roots + WHERE namespace_root = $7 + LIMIT 1 + ), + ( + -- Select the server id from the universe_servers table. + SELECT id + FROM universe_servers + WHERE server_host = $8 + LIMIT 1 + ) +) ON CONFLICT (sync_direction, proof_leaf_id, universe_root_id, servers_id) +DO UPDATE SET + status = EXCLUDED.status, + timestamp = EXCLUDED.timestamp, + -- Increment the attempt counter. + attempt_counter = CASE + WHEN $9 = true THEN log.attempt_counter + 1 + ELSE log.attempt_counter + END +RETURNING id +` + +type UpsertFederationProofSyncLogParams struct { + Status string + Timestamp time.Time + SyncDirection string + LeafNamespace string + LeafMintingPointBytes []byte + LeafScriptKeyBytes []byte + UniverseIDNamespace string + ServerHost string + BumpSyncAttemptCounter interface{} +} + +func (q *Queries) UpsertFederationProofSyncLog(ctx context.Context, arg UpsertFederationProofSyncLogParams) (int64, error) { + row := q.db.QueryRowContext(ctx, upsertFederationProofSyncLog, + arg.Status, + arg.Timestamp, + arg.SyncDirection, + arg.LeafNamespace, + arg.LeafMintingPointBytes, + arg.LeafScriptKeyBytes, + arg.UniverseIDNamespace, + arg.ServerHost, + arg.BumpSyncAttemptCounter, + ) + var id int64 + err := row.Scan(&id) + return id, err +} + const upsertFederationUniSyncConfig = `-- name: UpsertFederationUniSyncConfig :exec INSERT INTO federation_uni_sync_config ( namespace, asset_id, group_key, proof_type, allow_sync_insert, allow_sync_export diff --git a/tapdb/sqlite.go b/tapdb/sqlite.go index 500980fcb..2a3b2d064 100644 --- a/tapdb/sqlite.go +++ b/tapdb/sqlite.go @@ -160,11 +160,11 @@ func NewSqliteStore(cfg *SqliteConfig) (*SqliteStore, error) { func NewTestSqliteDB(t *testing.T) *SqliteStore { t.Helper() - t.Logf("Creating new SQLite DB for testing") + dbFileName := filepath.Join(t.TempDir(), "tmp.db") + t.Logf("Creating new SQLite DB for testing: %s", dbFileName) // TODO(roasbeef): if we pass :memory: for the file name, then we get // an in mem version to speed up tests - dbFileName := filepath.Join(t.TempDir(), "tmp.db") sqlDB, err := NewSqliteStore(&SqliteConfig{ DatabaseFileName: dbFileName, SkipMigrations: false, diff --git a/tapdb/sqlutils_test.go b/tapdb/sqlutils_test.go new file mode 100644 index 000000000..034cf4bcd --- /dev/null +++ b/tapdb/sqlutils_test.go @@ -0,0 +1,284 @@ +package tapdb + +import ( + "bytes" + "context" + "database/sql" + "fmt" + "math/rand" + "testing" + "time" + + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" + "github.com/lightninglabs/taproot-assets/asset" + "github.com/lightninglabs/taproot-assets/commitment" + "github.com/lightninglabs/taproot-assets/internal/test" + "github.com/lightninglabs/taproot-assets/proof" + "github.com/lightninglabs/taproot-assets/tapdb/sqlc" + "github.com/lightninglabs/taproot-assets/universe" + "github.com/lightningnetwork/lnd/clock" + "github.com/stretchr/testify/require" +) + +// DbHandler is a helper struct that contains all the database stores. +type DbHandler struct { + // UniverseFederationStore is a handle to the universe federation store. + UniverseFederationStore *UniverseFederationDB + + // MultiverseStore is a handle to the multiverse store. + MultiverseStore *MultiverseStore + + // AssetMintingStore is a handle to the pending (minting) assets store. + AssetMintingStore *AssetMintingStore + + // AssetStore is a handle to the active assets store. + AssetStore *AssetStore + + // DirectQuery is a handle to the underlying database that can be used + // to query the database directly. + DirectQuery sqlc.Querier +} + +// AddRandomAssetProof generates a random asset and corresponding proof and +// inserts them into the given test database. +func (d *DbHandler) AddRandomAssetProof(t *testing.T) (*asset.Asset, + *proof.AnnotatedProof) { + + var ( + ctx = context.Background() + + assetStore = d.AssetStore + db = d.DirectQuery + ) + + // Next, we'll make a new random asset that also has a few inputs with + // dummy witness information. + testAsset := randAsset(t) + + assetRoot, err := commitment.NewAssetCommitment(testAsset) + require.NoError(t, err) + + taprootAssetRoot, err := commitment.NewTapCommitment(assetRoot) + require.NoError(t, err) + + // With our asset created, we can now create the AnnotatedProof we use + // to import assets into the database. + var blockHash chainhash.Hash + _, err = rand.Read(blockHash[:]) + require.NoError(t, err) + + anchorTx := wire.NewMsgTx(2) + anchorTx.AddTxIn(&wire.TxIn{}) + anchorTx.AddTxOut(&wire.TxOut{ + PkScript: bytes.Repeat([]byte{0x01}, 34), + Value: 10, + }) + + assetID := testAsset.ID() + anchorPoint := wire.OutPoint{ + Hash: anchorTx.TxHash(), + Index: 0, + } + + // Generate a random proof and encode it into a proof blob. + testProof := randProof(t, testAsset) + + var proofBlobBuffer bytes.Buffer + err = testProof.Encode(&proofBlobBuffer) + require.NoError(t, err) + + proofBlob := proofBlobBuffer.Bytes() + scriptKey := testAsset.ScriptKey + + annotatedProof := &proof.AnnotatedProof{ + Locator: proof.Locator{ + AssetID: &assetID, + ScriptKey: *scriptKey.PubKey, + }, + Blob: proofBlob, + AssetSnapshot: &proof.AssetSnapshot{ + Asset: testAsset, + OutPoint: anchorPoint, + AnchorBlockHash: blockHash, + AnchorBlockHeight: test.RandInt[uint32](), + AnchorTxIndex: test.RandInt[uint32](), + AnchorTx: anchorTx, + OutputIndex: 0, + InternalKey: test.RandPubKey(t), + ScriptRoot: taprootAssetRoot, + }, + } + if testAsset.GroupKey != nil { + annotatedProof.GroupKey = &testAsset.GroupKey.GroupPubKey + } + + // We'll now insert the internal key information as well as the script + // key ahead of time to reflect the address creation that happens + // elsewhere. + _, err = db.UpsertInternalKey(ctx, InternalKey{ + RawKey: annotatedProof.InternalKey.SerializeCompressed(), + KeyFamily: test.RandInt[int32](), + KeyIndex: test.RandInt[int32](), + }) + require.NoError(t, err) + rawScriptKeyID, err := db.UpsertInternalKey(ctx, InternalKey{ + RawKey: scriptKey.RawKey.PubKey.SerializeCompressed(), + KeyFamily: int32(testAsset.ScriptKey.RawKey.Family), + KeyIndex: int32(testAsset.ScriptKey.RawKey.Index), + }) + require.NoError(t, err) + _, err = db.UpsertScriptKey(ctx, NewScriptKey{ + InternalKeyID: rawScriptKeyID, + TweakedScriptKey: scriptKey.PubKey.SerializeCompressed(), + Tweak: nil, + }) + require.NoError(t, err) + + // We'll add the chain transaction of the proof now to simulate a + // batched transfer on a higher layer. + var anchorTxBuf bytes.Buffer + err = annotatedProof.AnchorTx.Serialize(&anchorTxBuf) + require.NoError(t, err) + anchorTXID := annotatedProof.AnchorTx.TxHash() + _, err = db.UpsertChainTx(ctx, ChainTxParams{ + Txid: anchorTXID[:], + RawTx: anchorTxBuf.Bytes(), + BlockHeight: sqlInt32(annotatedProof.AnchorBlockHeight), + BlockHash: annotatedProof.AnchorBlockHash[:], + TxIndex: sqlInt32(annotatedProof.AnchorTxIndex), + }) + require.NoError(t, err, "unable to insert chain tx: %w", err) + + // Before we insert the proof, we expect our backend to report it as not + // found. + proofLocator := proof.Locator{ + ScriptKey: *testAsset.ScriptKey.PubKey, + } + found, err := assetStore.HasProof(ctx, proofLocator) + require.NoError(t, err) + require.False(t, found) + + // With all our test data constructed, we'll now attempt to import the + // asset into the database. + require.NoError(t, assetStore.ImportProofs( + ctx, proof.MockHeaderVerifier, proof.MockGroupVerifier, false, + annotatedProof, + )) + + // Now the HasProof should return true. + found, err = assetStore.HasProof(ctx, proofLocator) + require.NoError(t, err) + require.True(t, found) + + return testAsset, annotatedProof +} + +// AddUniProofLeaf generates a universe proof leaf and inserts it into the test +// database. +func (d *DbHandler) AddUniProofLeaf(t *testing.T, testAsset *asset.Asset, + annotatedProof *proof.AnnotatedProof) *universe.Proof { + + ctx := context.Background() + + // Insert proof into the multiverse/universe store. This step will + // populate the universe root and universe leaves tables. + uniId := universe.NewUniIDFromAsset(*testAsset) + + leafKey := universe.LeafKey{ + OutPoint: annotatedProof.AssetSnapshot.OutPoint, + ScriptKey: &testAsset.ScriptKey, + } + + leaf := universe.Leaf{ + GenesisWithGroup: universe.GenesisWithGroup{ + Genesis: testAsset.Genesis, + GroupKey: testAsset.GroupKey, + }, + RawProof: annotatedProof.Blob, + Asset: testAsset, + Amt: testAsset.Amount, + } + + uniProof, err := d.MultiverseStore.UpsertProofLeaf( + ctx, uniId, leafKey, &leaf, nil, + ) + require.NoError(t, err) + + return uniProof +} + +// AddRandomServerAddrs is a helper function that will create server addresses +// and add them to the database. +func (d *DbHandler) AddRandomServerAddrs(t *testing.T, + numServers int) []universe.ServerAddr { + + var ( + ctx = context.Background() + fedDB = d.UniverseFederationStore + ) + + addrs := make([]universe.ServerAddr, 0, numServers) + for i := 0; i < numServers; i++ { + portOffset := i + 10_000 + hostStr := fmt.Sprintf("localhost:%v", portOffset) + + addr := universe.NewServerAddr(int64(i+1), hostStr) + addrs = append(addrs, addr) + } + + // With the set of addrs created, we'll now insert them all into the + // database. + err := fedDB.AddServers(ctx, addrs...) + require.NoError(t, err) + + return addrs +} + +// NewDbHandle creates a new store and query handle to the test database. +func NewDbHandle(t *testing.T) *DbHandler { + // Create a new test database. + db := NewTestDB(t) + + testClock := clock.NewTestClock(time.Now()) + + // Gain a handle to the pending (minting) universe federation store. + universeServerTxCreator := NewTransactionExecutor( + db, func(tx *sql.Tx) UniverseServerStore { + return db.WithTx(tx) + }, + ) + fedStore := NewUniverseFederationDB(universeServerTxCreator, testClock) + + // Gain a handle to the multiverse store. + multiverseTxCreator := NewTransactionExecutor(db, + func(tx *sql.Tx) BaseMultiverseStore { + return db.WithTx(tx) + }, + ) + multiverseStore := NewMultiverseStore(multiverseTxCreator) + + // Gain a handle to the pending (minting) assets store. + assetMintingDB := NewTransactionExecutor( + db, func(tx *sql.Tx) PendingAssetStore { + return db.WithTx(tx) + }, + ) + assetMintingStore := NewAssetMintingStore(assetMintingDB) + + // Gain a handle to the active assets store. + assetsDB := NewTransactionExecutor( + db, func(tx *sql.Tx) ActiveAssetsStore { + return db.WithTx(tx) + }, + ) + activeAssetsStore := NewAssetStore(assetsDB, testClock) + + return &DbHandler{ + UniverseFederationStore: fedStore, + MultiverseStore: multiverseStore, + AssetMintingStore: assetMintingStore, + AssetStore: activeAssetsStore, + DirectQuery: db, + } +} diff --git a/tapdb/universe_federation.go b/tapdb/universe_federation.go index 5052ac1a0..c6bbea8a5 100644 --- a/tapdb/universe_federation.go +++ b/tapdb/universe_federation.go @@ -1,7 +1,9 @@ package tapdb import ( + "bytes" "context" + "database/sql" "errors" "fmt" "sort" @@ -9,8 +11,11 @@ import ( "time" "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec/v2/schnorr" + "github.com/btcsuite/btcd/wire" "github.com/lightninglabs/taproot-assets/asset" "github.com/lightninglabs/taproot-assets/fn" + "github.com/lightninglabs/taproot-assets/proof" "github.com/lightninglabs/taproot-assets/tapdb/sqlc" "github.com/lightninglabs/taproot-assets/universe" "github.com/lightningnetwork/lnd/clock" @@ -19,6 +24,20 @@ import ( ) type ( + // UpsertFedProofSyncLogParams is used to upsert federation proof sync + // logs. + UpsertFedProofSyncLogParams = sqlc.UpsertFederationProofSyncLogParams + + // QueryFedProofSyncLogParams is used to query for federation proof sync + // logs. + QueryFedProofSyncLogParams = sqlc.QueryFederationProofSyncLogParams + + // DeleteFedProofSyncLogParams is used to delete proof sync log entries. + DeleteFedProofSyncLogParams = sqlc.DeleteFederationProofSyncLogParams + + // ProofSyncLogEntry is a single entry from the proof sync log. + ProofSyncLogEntry = sqlc.QueryFederationProofSyncLogRow + // NewUniverseServer is used to create a new universe server. NewUniverseServer = sqlc.InsertUniverseServerParams @@ -40,6 +59,9 @@ type ( // FedUniSyncConfigs is the universe specific federation sync config // returned from a query. FedUniSyncConfigs = sqlc.FederationUniSyncConfig + + // QueryUniServersParams is used to query for universe servers. + QueryUniServersParams = sqlc.QueryUniverseServersParams ) var ( @@ -59,6 +81,26 @@ var ( } ) +// FederationProofSyncLogStore is used to log the sync status of individual +// universe proofs. +type FederationProofSyncLogStore interface { + BaseUniverseStore + + // UpsertFederationProofSyncLog upserts a proof sync log entry for a + // given proof leaf and server. + UpsertFederationProofSyncLog(ctx context.Context, + arg UpsertFedProofSyncLogParams) (int64, error) + + // QueryFederationProofSyncLog returns the set of proof sync logs for a + // given proof leaf. + QueryFederationProofSyncLog(ctx context.Context, + arg QueryFedProofSyncLogParams) ([]ProofSyncLogEntry, error) + + // DeleteFederationProofSyncLog deletes proof sync log entries. + DeleteFederationProofSyncLog(ctx context.Context, + arg DeleteFedProofSyncLogParams) error +} + // FederationSyncConfigStore is used to manage the set of Universe servers as // part of a federation. type FederationSyncConfigStore interface { @@ -87,6 +129,7 @@ type FederationSyncConfigStore interface { // of a federation. type UniverseServerStore interface { FederationSyncConfigStore + FederationProofSyncLogStore // InsertUniverseServer inserts a new universe server in to the DB. InsertUniverseServer(ctx context.Context, arg NewUniverseServer) error @@ -97,8 +140,10 @@ type UniverseServerStore interface { // LogServerSync marks that a server was just synced in the DB. LogServerSync(ctx context.Context, arg sqlc.LogServerSyncParams) error - // ListUniverseServers returns the total set of all universe servers. - ListUniverseServers(ctx context.Context) ([]sqlc.UniverseServer, error) + // QueryUniverseServers returns a set of universe servers. + QueryUniverseServers(ctx context.Context, + arg sqlc.QueryUniverseServersParams) ([]sqlc.UniverseServer, + error) } // UniverseFederationOptions is the database tx object for the universe server store. @@ -174,7 +219,9 @@ func (u *UniverseFederationDB) UniverseServers( readTx := NewUniverseFederationReadTx() dbErr := u.db.ExecTx(ctx, &readTx, func(db UniverseServerStore) error { - servers, err := db.ListUniverseServers(ctx) + servers, err := db.QueryUniverseServers( + ctx, QueryUniServersParams{}, + ) if err != nil { return err } @@ -261,6 +308,301 @@ func (u *UniverseFederationDB) LogNewSyncs(ctx context.Context, }) } +// UpsertFederationProofSyncLog upserts a federation proof sync log entry for a +// given universe server and proof. +func (u *UniverseFederationDB) UpsertFederationProofSyncLog( + ctx context.Context, uniID universe.Identifier, + leafKey universe.LeafKey, addr universe.ServerAddr, + syncDirection universe.SyncDirection, + syncStatus universe.ProofSyncStatus, + bumpSyncAttemptCounter bool) (int64, error) { + + // Encode the leaf key outpoint as bytes. We'll use this to look up the + // leaf ID in the DB. + leafKeyOutpointBytes, err := encodeOutpoint(leafKey.OutPoint) + if err != nil { + return 0, err + } + + // Encode the leaf script key pub key as bytes. We'll use this to look + // up the leaf ID in the DB. + scriptKeyPubKeyBytes := schnorr.SerializePubKey( + leafKey.ScriptKey.PubKey, + ) + + var ( + writeTx UniverseFederationOptions + logID int64 + ) + + err = u.db.ExecTx(ctx, &writeTx, func(db UniverseServerStore) error { + params := UpsertFedProofSyncLogParams{ + Status: string(syncStatus), + Timestamp: time.Now().UTC(), + SyncDirection: string(syncDirection), + UniverseIDNamespace: uniID.String(), + LeafNamespace: uniID.String(), + LeafMintingPointBytes: leafKeyOutpointBytes, + LeafScriptKeyBytes: scriptKeyPubKeyBytes, + ServerHost: addr.HostStr(), + BumpSyncAttemptCounter: bumpSyncAttemptCounter, + } + logID, err = db.UpsertFederationProofSyncLog(ctx, params) + if err != nil { + return err + } + + return nil + }) + + return logID, err +} + +// QueryFederationProofSyncLog queries the federation proof sync log and returns +// the log entries which correspond to the given universe proof leaf. +func (u *UniverseFederationDB) QueryFederationProofSyncLog( + ctx context.Context, uniID universe.Identifier, + leafKey universe.LeafKey, + syncDirection universe.SyncDirection, + syncStatus universe.ProofSyncStatus) ([]*universe.ProofSyncLogEntry, + error) { + + // Encode the leaf key outpoint as bytes. We'll use this to look up the + // leaf ID in the DB. + leafKeyOutpointBytes, err := encodeOutpoint(leafKey.OutPoint) + if err != nil { + return nil, err + } + + // Encode the leaf script key pub key as bytes. We'll use this to look + // up the leaf ID in the DB. + scriptKeyPubKeyBytes := schnorr.SerializePubKey( + leafKey.ScriptKey.PubKey, + ) + + var ( + readTx = NewUniverseFederationReadTx() + proofSyncLogs []*universe.ProofSyncLogEntry + ) + + err = u.db.ExecTx(ctx, &readTx, func(db UniverseServerStore) error { + params := QueryFedProofSyncLogParams{ + SyncDirection: sqlStr(string(syncDirection)), + Status: sqlStr(string(syncStatus)), + LeafNamespace: sqlStr(uniID.String()), + LeafMintingPointBytes: leafKeyOutpointBytes, + LeafScriptKeyBytes: scriptKeyPubKeyBytes, + } + logEntries, err := db.QueryFederationProofSyncLog(ctx, params) + + // Parse database proof sync logs. Multiple log entries may + // exist for a given leaf because each log entry is unique to a + // server. + proofSyncLogs = make( + []*universe.ProofSyncLogEntry, 0, len(logEntries), + ) + for idx := range logEntries { + entry := logEntries[idx] + + parsedLogEntry, err := fetchProofSyncLogEntry( + ctx, entry, db, + ) + if err != nil { + return err + } + + proofSyncLogs = append(proofSyncLogs, parsedLogEntry) + } + + return err + }) + if err != nil { + return nil, err + } + + return proofSyncLogs, nil +} + +// FetchPendingProofsSyncLog queries the federation proof sync log and returns +// all log entries with sync status pending. +func (u *UniverseFederationDB) FetchPendingProofsSyncLog(ctx context.Context, + syncDirection *universe.SyncDirection) ([]*universe.ProofSyncLogEntry, + error) { + + var ( + readTx = NewUniverseFederationReadTx() + proofSyncLogs []*universe.ProofSyncLogEntry + ) + + err := u.db.ExecTx(ctx, &readTx, func(db UniverseServerStore) error { + // If the sync direction is not set, then we'll query for all + // pending proof sync log entries. + var sqlSyncDirection sql.NullString + if syncDirection != nil { + sqlSyncDirection = sqlStr(string(*syncDirection)) + } + + sqlProofSyncStatus := sqlStr( + string(universe.ProofSyncStatusPending), + ) + + params := QueryFedProofSyncLogParams{ + SyncDirection: sqlSyncDirection, + Status: sqlProofSyncStatus, + } + logEntries, err := db.QueryFederationProofSyncLog(ctx, params) + if err != nil { + return fmt.Errorf("unable to query proof sync log: %w", + err) + } + + // Parse log entries from database row. + proofSyncLogs = make( + []*universe.ProofSyncLogEntry, 0, len(logEntries), + ) + for idx := range logEntries { + entry := logEntries[idx] + + parsedLogEntry, err := fetchProofSyncLogEntry( + ctx, entry, db, + ) + if err != nil { + return err + } + + proofSyncLogs = append(proofSyncLogs, parsedLogEntry) + } + + return nil + }) + if err != nil { + return nil, err + } + + return proofSyncLogs, nil +} + +// fetchProofSyncLogEntry returns a proof sync log entry given a DB row. +func fetchProofSyncLogEntry(ctx context.Context, entry ProofSyncLogEntry, + dbTx UniverseServerStore) (*universe.ProofSyncLogEntry, error) { + + // Fetch asset genesis for the leaf. + leafAssetGen, err := fetchGenesis(ctx, dbTx, entry.LeafGenAssetID) + if err != nil { + return nil, err + } + + // We only need to obtain the asset at this point, so we'll do a sparse + // decode here to decode only the asset record. + var leafAsset asset.Asset + assetRecord := proof.AssetLeafRecord(&leafAsset) + err = proof.SparseDecode( + bytes.NewReader(entry.LeafGenesisProof), assetRecord, + ) + if err != nil { + return nil, fmt.Errorf("unable to decode proof: %w", err) + } + + leaf := &universe.Leaf{ + GenesisWithGroup: universe.GenesisWithGroup{ + Genesis: leafAssetGen, + GroupKey: leafAsset.GroupKey, + }, + RawProof: entry.LeafGenesisProof, + Asset: &leafAsset, + Amt: leafAsset.Amount, + } + + // Parse leaf key from leaf DB row. + scriptKeyPub, err := schnorr.ParsePubKey( + entry.LeafScriptKeyBytes, + ) + if err != nil { + return nil, err + } + scriptKey := asset.NewScriptKey(scriptKeyPub) + + var outPoint wire.OutPoint + err = readOutPoint( + bytes.NewReader(entry.LeafMintingPointBytes), 0, 0, + &outPoint, + ) + if err != nil { + return nil, err + } + + leafKey := universe.LeafKey{ + OutPoint: outPoint, + ScriptKey: &scriptKey, + } + + // Parse server address from DB row. + serverAddr := universe.NewServerAddr(entry.ServerID, entry.ServerHost) + + // Parse proof sync status directly from the DB row. + status, err := universe.ParseStrProofSyncStatus(entry.Status) + if err != nil { + return nil, err + } + + // Parse proof sync direction directly from the DB row. + direction, err := universe.ParseStrSyncDirection(entry.SyncDirection) + if err != nil { + return nil, err + } + + uniID, err := universe.NewUniIDFromRawArgs( + entry.UniAssetID, entry.UniGroupKey, + entry.UniProofType, + ) + if err != nil { + return nil, err + } + + return &universe.ProofSyncLogEntry{ + Timestamp: entry.Timestamp, + SyncStatus: status, + SyncDirection: direction, + AttemptCounter: entry.AttemptCounter, + ServerAddr: serverAddr, + + UniID: uniID, + LeafKey: leafKey, + Leaf: *leaf, + }, nil +} + +// DeleteProofsSyncLogEntries deletes a set of proof sync log entries. +func (u *UniverseFederationDB) DeleteProofsSyncLogEntries(ctx context.Context, + servers ...universe.ServerAddr) error { + + var writeTx UniverseFederationOptions + + err := u.db.ExecTx(ctx, &writeTx, func(db UniverseServerStore) error { + // Delete proof sync log entries which are associated with each + // server. + for i := range servers { + server := servers[i] + + err := db.DeleteFederationProofSyncLog( + ctx, DeleteFedProofSyncLogParams{ + ServerHost: sqlStr(server.HostStr()), + }, + ) + if err != nil { + return err + } + } + + return nil + }) + if err != nil { + return err + } + + return nil +} + // UpsertFederationSyncConfig upserts both the global and universe specific // federation sync configs. func (u *UniverseFederationDB) UpsertFederationSyncConfig( diff --git a/tapdb/universe_federation_test.go b/tapdb/universe_federation_test.go index b7fb80a21..7492a2c61 100644 --- a/tapdb/universe_federation_test.go +++ b/tapdb/universe_federation_test.go @@ -3,7 +3,6 @@ package tapdb import ( "context" "database/sql" - "fmt" "testing" "time" @@ -35,10 +34,12 @@ func newTestFederationDb(t *testing.T, func TestUniverseFederationCRUD(t *testing.T) { t.Parallel() - testClock := clock.NewTestClock(time.Now()) - fedDB, _ := newTestFederationDb(t, testClock) + var ( + ctx = context.Background() - ctx := context.Background() + db = NewDbHandle(t) + fedDB = db.UniverseFederationStore + ) // If we try to list the set of servers without any added, we should // get the error we expect. @@ -47,19 +48,7 @@ func TestUniverseFederationCRUD(t *testing.T) { require.Empty(t, dbServers) // Next, we'll try to add a new series of servers to the DB. - const numServers = 10 - addrs := make([]universe.ServerAddr, 0, numServers) - for i := int64(0); i < numServers; i++ { - portOffset := i + 10_000 - hostStr := fmt.Sprintf("localhost:%v", portOffset) - - addrs = append(addrs, universe.NewServerAddr(i+1, hostStr)) - } - - // With the set of addrs created, we'll now insert them all into the - // database. - err = fedDB.AddServers(ctx, addrs...) - require.NoError(t, err) + addrs := db.AddRandomServerAddrs(t, 10) // If we try to insert them all again, then we should get an error as // we ensure the host names are unique. @@ -98,6 +87,284 @@ func TestUniverseFederationCRUD(t *testing.T) { require.NoError(t, err) } +// TestFederationProofSyncLogCRUD tests that we can add, modify, and remove +// proof sync log entries from the Universe DB. +func TestFederationProofSyncLogCRUD(t *testing.T) { + t.Parallel() + + var ( + ctx = context.Background() + dbHandle = NewDbHandle(t) + fedStore = dbHandle.UniverseFederationStore + ) + + // Populate the database with a random asset, its associated proof, and + // a set of servers. + testAsset, testAnnotatedProof := dbHandle.AddRandomAssetProof(t) + uniProof := dbHandle.AddUniProofLeaf(t, testAsset, testAnnotatedProof) + uniId := universe.NewUniIDFromAsset(*testAsset) + + servers := dbHandle.AddRandomServerAddrs(t, 3) + + // Designate pending sync status for all servers except the first. + // Make a map set of pending sync servers. + pendingSyncServers := make(map[universe.ServerAddr]struct{}) + for i := range servers { + server := servers[i] + if i == 0 { + continue + } + pendingSyncServers[server] = struct{}{} + } + + // Add log entries for the first server. + syncServer := servers[0] + + // Add push log entry. + _, err := fedStore.UpsertFederationProofSyncLog( + ctx, uniId, uniProof.LeafKey, syncServer, + universe.SyncDirectionPush, universe.ProofSyncStatusComplete, + true, + ) + require.NoError(t, err) + + // Add pull log entry. + _, err = fedStore.UpsertFederationProofSyncLog( + ctx, uniId, uniProof.LeafKey, syncServer, + universe.SyncDirectionPull, universe.ProofSyncStatusComplete, + true, + ) + require.NoError(t, err) + + // We've already added log entries for the first server. We will now + // insert new proof sync log entries for the remaining servers. + for _, server := range servers[1:] { + _, err := fedStore.UpsertFederationProofSyncLog( + ctx, uniId, uniProof.LeafKey, server, + universe.SyncDirectionPush, + universe.ProofSyncStatusPending, false, + ) + require.NoError(t, err) + } + + // Retrieve all sync status pending log entries. + syncDirectionPush := universe.SyncDirectionPush + pendingLogEntries, err := fedStore.FetchPendingProofsSyncLog( + ctx, &syncDirectionPush, + ) + require.NoError(t, err) + require.Len(t, pendingLogEntries, 2) + + for i := range pendingLogEntries { + entry := pendingLogEntries[i] + require.Equal( + t, universe.ProofSyncStatusPending, entry.SyncStatus, + ) + require.Equal( + t, universe.SyncDirectionPush, entry.SyncDirection, + ) + require.Equal(t, uniId.String(), entry.UniID.String()) + require.Equal(t, int64(0), entry.AttemptCounter) + + assertProofSyncLogLeafKey(t, uniProof.LeafKey, entry.LeafKey) + assertProofSyncLogLeaf(t, *uniProof.Leaf, entry.Leaf) + + // Check for server address in pending sync server set. + _, ok := pendingSyncServers[entry.ServerAddr] + require.True(t, ok) + } + + // Retrieve all push sync status complete log entries. + completePushLogEntries, err := fedStore.QueryFederationProofSyncLog( + ctx, uniId, uniProof.LeafKey, universe.SyncDirectionPush, + universe.ProofSyncStatusComplete, + ) + require.NoError(t, err) + + // There should only be one complete push log entry. + require.Len(t, completePushLogEntries, 1) + + // Check that the complete log entry is as expected. + completePushEntry := completePushLogEntries[0] + + require.Equal(t, servers[0], completePushEntry.ServerAddr) + require.Equal( + t, universe.ProofSyncStatusComplete, + completePushEntry.SyncStatus, + ) + require.Equal( + t, universe.SyncDirectionPush, completePushEntry.SyncDirection, + ) + require.Equal(t, uniId.String(), completePushEntry.UniID.String()) + require.Equal(t, int64(0), completePushEntry.AttemptCounter) + + assertProofSyncLogLeafKey( + t, uniProof.LeafKey, completePushEntry.LeafKey, + ) + assertProofSyncLogLeaf(t, *uniProof.Leaf, completePushEntry.Leaf) + + // Retrieve all pull sync status complete log entries. + completePullLogEntries, err := fedStore.QueryFederationProofSyncLog( + ctx, uniId, uniProof.LeafKey, universe.SyncDirectionPull, + universe.ProofSyncStatusComplete, + ) + require.NoError(t, err) + + // There should only be one complete push log entry. + require.Len(t, completePullLogEntries, 1) + + // Check that the complete log entry is as expected. + completePullEntry := completePullLogEntries[0] + + require.Equal(t, servers[0], completePullEntry.ServerAddr) + require.Equal( + t, universe.ProofSyncStatusComplete, + completePullEntry.SyncStatus, + ) + require.Equal( + t, universe.SyncDirectionPull, completePullEntry.SyncDirection, + ) + require.Equal(t, uniId.String(), completePullEntry.UniID.String()) + require.Equal(t, int64(0), completePullEntry.AttemptCounter) + + assertProofSyncLogLeafKey( + t, uniProof.LeafKey, completePullEntry.LeafKey, + ) + assertProofSyncLogLeaf(t, *uniProof.Leaf, completePullEntry.Leaf) + + // Increment the attempt counter for one of the pending log entries. + _, err = fedStore.UpsertFederationProofSyncLog( + ctx, uniId, uniProof.LeafKey, servers[1], + universe.SyncDirectionPush, universe.ProofSyncStatusPending, + true, + ) + require.NoError(t, err) + + // Check that the attempt counter was incremented as expected. + pendingLogEntries, err = fedStore.QueryFederationProofSyncLog( + ctx, uniId, uniProof.LeafKey, universe.SyncDirectionPush, + universe.ProofSyncStatusPending, + ) + require.NoError(t, err) + require.Len(t, pendingLogEntries, 2) + + for i := range pendingLogEntries { + entry := pendingLogEntries[i] + if entry.ServerAddr == servers[1] { + require.Equal(t, int64(1), entry.AttemptCounter) + } else { + require.Equal(t, int64(0), entry.AttemptCounter) + } + } + + // Upsert without incrementing the attempt counter for one of the + // pending log entries. + _, err = fedStore.UpsertFederationProofSyncLog( + ctx, uniId, uniProof.LeafKey, servers[1], + universe.SyncDirectionPush, universe.ProofSyncStatusPending, + false, + ) + require.NoError(t, err) + + // Check that the attempt counter was not changed as expected. + pendingLogEntries, err = fedStore.QueryFederationProofSyncLog( + ctx, uniId, uniProof.LeafKey, universe.SyncDirectionPush, + universe.ProofSyncStatusPending, + ) + require.NoError(t, err) + require.Len(t, pendingLogEntries, 2) + + for i := range pendingLogEntries { + entry := pendingLogEntries[i] + if entry.ServerAddr == servers[1] { + require.Equal(t, int64(1), entry.AttemptCounter) + } else { + require.Equal(t, int64(0), entry.AttemptCounter) + } + } + + // Set the sync status to complete for one of the pending log entries. + _, err = fedStore.UpsertFederationProofSyncLog( + ctx, uniId, uniProof.LeafKey, servers[1], + universe.SyncDirectionPush, universe.ProofSyncStatusComplete, + false, + ) + require.NoError(t, err) + + // Check that the sync status was updated as expected. + pendingLogEntries, err = fedStore.QueryFederationProofSyncLog( + ctx, uniId, uniProof.LeafKey, universe.SyncDirectionPush, + universe.ProofSyncStatusPending, + ) + require.NoError(t, err) + require.Len(t, pendingLogEntries, 1) + + completePushLogEntries, err = fedStore.QueryFederationProofSyncLog( + ctx, uniId, uniProof.LeafKey, universe.SyncDirectionPush, + universe.ProofSyncStatusComplete, + ) + require.NoError(t, err) + require.Len(t, completePushLogEntries, 2) + + // Delete log entries for one of the servers. + err = fedStore.DeleteProofsSyncLogEntries(ctx, servers[0], servers[1]) + require.NoError(t, err) + + // Only one log entry should remain and it should have sync status + // pending. + pendingLogEntries, err = fedStore.QueryFederationProofSyncLog( + ctx, uniId, uniProof.LeafKey, universe.SyncDirectionPush, + universe.ProofSyncStatusPending, + ) + require.NoError(t, err) + require.Len(t, pendingLogEntries, 1) + + // Check that the remaining log entry is as expected. + pendingEntry := pendingLogEntries[0] + require.Equal(t, servers[2], pendingEntry.ServerAddr) +} + +// assertProofSyncLogLeafKey asserts that a leaf key derived from a proof sync +// log entry is equal to a given leaf key. +func assertProofSyncLogLeafKey(t *testing.T, actualLeafKey universe.LeafKey, + logLeafKey universe.LeafKey) { + + // We can safely ignore the tweaked script key as it is the derivation + // information for the script key. It is only ever known to the owner of + // the asset and is never serialized in a proof + actualLeafKey.ScriptKey.TweakedScriptKey = nil + require.Equal(t, actualLeafKey, logLeafKey) +} + +// assertProofSyncLogLeaf asserts that a leaf derived from a proof sync log +// entry is equal to a given universe leaf. +func assertProofSyncLogLeaf(t *testing.T, actualLeaf universe.Leaf, + logLeaf universe.Leaf) { + + if actualLeaf.GenesisWithGroup.GroupKey != nil { + // We can safely ignore the group key witness as it is the + // basically just extracted from the asset and won't be relevant + // when parsing the proof. + actualLeaf.GenesisWithGroup.GroupKey.Witness = nil + + // We can safely ignore the pre-tweaked group key + // (GroupKey.RawKey) as it is the derivation information for the + // group key. It is only ever known to the owner of the asset + // and is never serialized in a proof. + actualLeaf.GenesisWithGroup.GroupKey.RawKey.PubKey = nil + } + + require.Equal(t, actualLeaf.Amt, logLeaf.Amt) + require.Equal(t, actualLeaf.RawProof, logLeaf.RawProof) + require.Equal(t, actualLeaf.GenesisWithGroup, logLeaf.GenesisWithGroup) + + // We compare the assets with our custom asset quality function as the + // SplitCommitmentRoot field MS-SMT node types will differ. A computed + // node is derived from the database data whereas the generated asset + // may have a MS-SMT branch node type. + actualLeaf.Asset.DeepEqual(logLeaf.Asset) +} + // TestFederationConfigDefault tests that we're able to fetch the default // federation config. func TestFederationConfigDefault(t *testing.T) { diff --git a/tapdb/universe_test.go b/tapdb/universe_test.go index c5803e02b..b82223e40 100644 --- a/tapdb/universe_test.go +++ b/tapdb/universe_test.go @@ -129,7 +129,12 @@ func randLeafKey(t *testing.T) universe.LeafKey { } } -func randProof(t *testing.T) *proof.Proof { +func randProof(t *testing.T, argAsset *asset.Asset) *proof.Proof { + proofAsset := *asset.RandAsset(t, asset.Normal) + if argAsset != nil { + proofAsset = *argAsset + } + return &proof.Proof{ PrevOut: wire.OutPoint{}, BlockHeader: wire.BlockHeader{ @@ -142,7 +147,7 @@ func randProof(t *testing.T) *proof.Proof { }}, }, TxMerkleProof: proof.TxMerkleProof{}, - Asset: *asset.RandAsset(t, asset.Normal), + Asset: proofAsset, InclusionProof: proof.TaprootProof{ InternalKey: test.RandPubKey(t), }, @@ -152,7 +157,7 @@ func randProof(t *testing.T) *proof.Proof { func randMintingLeaf(t *testing.T, assetGen asset.Genesis, groupKey *btcec.PublicKey) universe.Leaf { - randProof := randProof(t) + randProof := randProof(t, nil) leaf := universe.Leaf{ GenesisWithGroup: universe.GenesisWithGroup{ @@ -320,7 +325,7 @@ func TestUniverseIssuanceProofs(t *testing.T) { testLeaf := &testLeaves[idx] var proofBuf bytes.Buffer - randProof := randProof(t) + randProof := randProof(t, nil) require.NoError(t, randProof.Encode(&proofBuf)) testLeaf.Leaf.RawProof = proofBuf.Bytes() diff --git a/tapfreighter/chain_porter.go b/tapfreighter/chain_porter.go index e5953c99f..d345cf2c1 100644 --- a/tapfreighter/chain_porter.go +++ b/tapfreighter/chain_porter.go @@ -60,9 +60,10 @@ type ChainPorterConfig struct { // TODO(roasbeef): replace with proof.Courier in the future/ AssetProofs proof.Archiver - // ProofCourierCfg is a general config applicable to all proof courier - // service handles. - ProofCourierCfg *proof.CourierCfg + // ProofCourierDispatcher is the dispatcher that is used to create new + // proof courier handles for sending proofs based on the protocol of + // a proof courier address. + ProofCourierDispatcher proof.CourierDispatch // ProofWatcher is used to watch new proofs for their anchor transaction // to be confirmed safely with a minimum number of confirmations. @@ -648,7 +649,7 @@ func (p *ChainPorter) transferReceiverProof(pkg *sendPackage) error { log.Debugf("Attempting to deliver proof for script key %x", key.SerializeCompressed()) - proofCourierAddr, err := proof.ParseCourierAddrString( + proofCourierAddr, err := proof.ParseCourierAddress( string(out.ProofCourierAddr), ) if err != nil { @@ -663,14 +664,16 @@ func (p *ChainPorter) transferReceiverProof(pkg *sendPackage) error { AssetID: *receiverProof.AssetID, Amount: out.Amount, } - courier, err := proofCourierAddr.NewCourier( - ctx, p.cfg.ProofCourierCfg, recipient, + courier, err := p.cfg.ProofCourierDispatcher.NewCourier( + proofCourierAddr, recipient, ) if err != nil { return fmt.Errorf("unable to initiate proof courier "+ "service handle: %w", err) } + defer courier.Close() + // Update courier events subscribers before attempting to // deliver proof. p.subscriberMtx.Lock() @@ -698,9 +701,15 @@ func (p *ChainPorter) transferReceiverProof(pkg *sendPackage) error { return nil } - // If we have a proof courier instance active, then we'll launch several - // goroutines to deliver the proof(s) to the receiver(s). - if p.cfg.ProofCourierCfg != nil { + // If we have a non-interactive proof, then we'll launch several + // goroutines to deliver the proof(s) to the receiver(s). Since a + // pre-signed parcel (a parcel that uses the RPC driven vPSBT flow) + // doesn't have proof courier URLs (they aren't part of the vPSBT), the + // proofs must always be delivered in an interactive manner from sender + // to receiver, and we don't even need to attempt to use a proof + // courier. + _, isPreSigned := pkg.Parcel.(*PreSignedParcel) + if !isPreSigned { ctx, cancel := p.WithCtxQuitNoTimeout() defer cancel() diff --git a/tapfreighter/parcel.go b/tapfreighter/parcel.go index fe6018730..53066864b 100644 --- a/tapfreighter/parcel.go +++ b/tapfreighter/parcel.go @@ -183,7 +183,7 @@ func (p *AddressParcel) Validate() error { tapAddr := p.destAddrs[idx] // Validate proof courier addresses. - _, err := proof.ParseCourierAddrUrl(tapAddr.ProofCourierAddr) + err := proof.ValidateCourierAddress(&tapAddr.ProofCourierAddr) if err != nil { return fmt.Errorf("invalid proof courier address: %w", err) diff --git a/tapgarden/caretaker.go b/tapgarden/caretaker.go index d60c22dbb..78b867174 100644 --- a/tapgarden/caretaker.go +++ b/tapgarden/caretaker.go @@ -17,7 +17,6 @@ import ( "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" "github.com/lightninglabs/neutrino/cache/lru" - "github.com/lightninglabs/taproot-assets/asset" "github.com/lightninglabs/taproot-assets/commitment" "github.com/lightninglabs/taproot-assets/fn" @@ -1180,6 +1179,11 @@ func (b *BatchCaretaker) storeMintingProof(ctx context.Context, ID: uniID, Key: leafKey, Leaf: mintingLeaf, + + // We set this to true to indicate that we would like the syncer + // to log and reattempt (in the event of a failure) to push sync + // this proof leaf. + LogProofSync: true, }, nil } @@ -1336,14 +1340,13 @@ func newSingleValue[T any](v T) singleCacheValue[T] { // is used more as a set. type emptyCacheVal = singleCacheValue[emptyVal] -// GenGroupVeifier generates a group key verification callback function given a +// GenGroupVerifier generates a group key verification callback function given a // DB handle. func GenGroupVerifier(ctx context.Context, mintingStore MintingStore) func(*btcec.PublicKey) error { // Cache known group keys that were previously fetched. - assetGroups := lru.NewCache[ - asset.SerializedKey, emptyCacheVal]( + assetGroups := lru.NewCache[asset.SerializedKey, emptyCacheVal]( assetGroupCacheSize, ) @@ -1360,12 +1363,10 @@ func GenGroupVerifier(ctx context.Context, // This query will err if no stored group has a matching // tweaked group key. - _, err = mintingStore.FetchGroupByGroupKey( - ctx, groupKey, - ) + _, err = mintingStore.FetchGroupByGroupKey(ctx, groupKey) if err != nil { - return fmt.Errorf("%x: %w", assetGroupKey, - ErrGroupKeyUnknown) + return fmt.Errorf("%x: group verifier: %v: %w", + assetGroupKey[:], err, ErrGroupKeyUnknown) } _, _ = assetGroups.Put(assetGroupKey, emptyCacheVal{}) @@ -1377,12 +1378,12 @@ func GenGroupVerifier(ctx context.Context, // GenGroupAnchorVerifier generates a caching group anchor verification // callback function given a DB handle. func GenGroupAnchorVerifier(ctx context.Context, - mintingStore MintingStore) func(*asset.Genesis, - *asset.GroupKey) error { + mintingStore MintingStore) func(*asset.Genesis, *asset.GroupKey) error { // Cache anchors for groups that were previously fetched. groupAnchors := lru.NewCache[ - asset.SerializedKey, singleCacheValue[*asset.Genesis]]( + asset.SerializedKey, singleCacheValue[*asset.Genesis], + ]( assetGroupCacheSize, ) @@ -1394,7 +1395,9 @@ func GenGroupAnchorVerifier(ctx context.Context, ctx, &groupKey.GroupPubKey, ) if err != nil { - return ErrGroupKeyUnknown + return fmt.Errorf("%x: group anchor verifier: "+ + "%w", assetGroupKey[:], + ErrGroupKeyUnknown) } groupAnchor = newSingleValue(storedGroup.Genesis) diff --git a/tapgarden/custodian.go b/tapgarden/custodian.go index 024c974f0..a30f7eb8c 100644 --- a/tapgarden/custodian.go +++ b/tapgarden/custodian.go @@ -1,7 +1,6 @@ package tapgarden import ( - "bytes" "errors" "fmt" "strings" @@ -71,7 +70,7 @@ type CustodianConfig struct { // ProofArchive is the storage backend for proofs to which we store new // incoming proofs. - ProofArchive proof.NotifyArchiver + ProofArchive proof.Archiver // ProofNotifier is the storage backend for proofs from which we are // notified about new proofs. This can be the same as the ProofArchive @@ -80,9 +79,10 @@ type CustodianConfig struct { // being available in the relational database). ProofNotifier proof.NotifyArchiver - // ProofCourierCfg is a general config applicable to all proof courier - // service handles. - ProofCourierCfg *proof.CourierCfg + // ProofCourierDispatcher is the dispatcher that is used to create new + // proof courier handles for receiving proofs based on the protocol of + // a proof courier address. + ProofCourierDispatcher proof.CourierDispatch // ProofRetrievalDelay is the time duration the custodian waits having // identified an asset transfer on-chain and before retrieving the @@ -270,11 +270,24 @@ func (c *Custodian) watchInboundAssets() { // Maybe a proof was delivered while we were shutting down or // starting up, let's check now. - err = c.checkProofAvailable(event) + available, err := c.checkProofAvailable(event) if err != nil { reportErr(err) return } + + // If we did find a proof, we did import it now and can remove + // the event from our cache. + if available { + delete(c.events, event.Outpoint) + + continue + } + + // If we didn't find a proof, we'll launch a goroutine to use + // the ProofCourier to import the proof into our local DB. + c.Wg.Add(1) + go c.receiveProof(event.Addr.Tap, event.Outpoint) } // Read all on-chain transactions and make sure they are mapped to an @@ -382,13 +395,21 @@ func (c *Custodian) inspectWalletTx(walletTx *lndclient.Transaction) error { } c.events[op] = event + + // Now that we've seen this output confirm on + // chain, we'll launch a goroutine to use the + // ProofCourier to import the proof into our + // local DB. + c.Wg.Add(1) + go c.receiveProof(event.Addr.Tap, op) } continue } // This is a new output, let's find out if it's for an address - // of ours. + // of ours. This step also creates a new event for the address + // if it doesn't exist yet. addr, err := c.mapToTapAddr(walletTx, uint32(idx), op) if err != nil { return err @@ -400,110 +421,98 @@ func (c *Custodian) inspectWalletTx(walletTx *lndclient.Transaction) error { continue } - // TODO(ffranr): This proof courier disabled check should be - // removed. It was implemented because some integration test do - // not setup and use a proof courier. - if c.cfg.ProofCourierCfg == nil { + // We now need to wait for a confirmation, since proofs will + // be delivered once the anchor transaction is confirmed. If + // we skip it now, we'll receive another notification once the + // transaction is confirmed. + if walletTx.Confirmations == 0 { continue } - // Now that we've seen this output on chain, we'll launch a - // goroutine to use the ProofCourier to import the proof into - // our local DB. + // Now that we've seen this output confirm on chain, we'll + // launch a goroutine to use the ProofCourier to import the + // proof into our local DB. c.Wg.Add(1) - go func() { - defer c.Wg.Done() + go c.receiveProof(addr, op) + } - ctx, cancel := c.WithCtxQuitNoTimeout() - defer cancel() + return nil +} - assetID := addr.AssetID +// receiveProof attempts to receive a proof for the given address and outpoint +// via the proof courier service. +// +// NOTE: This must be called as a goroutine. +func (c *Custodian) receiveProof(addr *address.Tap, op wire.OutPoint) { + defer c.Wg.Done() - log.Debugf("Waiting to receive proof for script key %x", - addr.ScriptKey.SerializeCompressed()) + ctx, cancel := c.WithCtxQuitNoTimeout() + defer cancel() - // Initiate proof courier service handle from the proof - // courier address found in the Tap address. - recipient := proof.Recipient{ - ScriptKey: &addr.ScriptKey, - AssetID: assetID, - Amount: addr.Amount, - } - courier, err := proof.NewCourier( - ctx, addr.ProofCourierAddr, - c.cfg.ProofCourierCfg, recipient, - ) - if err != nil { - log.Errorf("unable to initiate proof courier "+ - "service handle: %v", err) - return - } + assetID := addr.AssetID - // Update courier handle events subscribers before - // attempting to retrieve proof. - c.statusEventsSubsMtx.Lock() - courier.SetSubscribers(c.statusEventsSubs) - c.statusEventsSubsMtx.Unlock() - - // Sleep to give the sender an opportunity to transfer - // the proof to the proof courier service. - // Without this delay our first attempt at retrieving - // the proof will very likely fail. We should expect - // retrieval success before this delay. - select { - case <-time.After(c.cfg.ProofRetrievalDelay): - case <-ctx.Done(): - return - } + scriptKeyBytes := addr.ScriptKey.SerializeCompressed() + log.Debugf("Waiting to receive proof for script key %x", scriptKeyBytes) - // Attempt to receive proof via proof courier service. - loc := proof.Locator{ - AssetID: &assetID, - GroupKey: addr.GroupKey, - ScriptKey: addr.ScriptKey, - OutPoint: &op, - } - addrProof, err := courier.ReceiveProof(ctx, loc) - if err != nil { - log.Errorf("unable to recv proof: %v", err) - return - } + // Initiate proof courier service handle from the proof courier address + // found in the Tap address. + recipient := proof.Recipient{ + ScriptKey: &addr.ScriptKey, + AssetID: assetID, + Amount: addr.Amount, + } + courier, err := c.cfg.ProofCourierDispatcher.NewCourier( + &addr.ProofCourierAddr, recipient, + ) + if err != nil { + log.Errorf("Unable to initiate proof courier service handle: "+ + "%v", err) + return + } - log.Debugf("Received proof for: script_key=%x, "+ - "asset_id=%x", - addr.ScriptKey.SerializeCompressed(), - assetID[:]) + // Update courier handle events subscribers before attempting to + // retrieve proof. + c.statusEventsSubsMtx.Lock() + courier.SetSubscribers(c.statusEventsSubs) + c.statusEventsSubsMtx.Unlock() + + // Sleep to give the sender an opportunity to transfer the proof to the + // proof courier service. Without this delay our first attempt at + // retrieving the proof will very likely fail. We should expect + // retrieval success before this delay. + select { + case <-time.After(c.cfg.ProofRetrievalDelay): + case <-ctx.Done(): + return + } - ctx, cancel = c.CtxBlocking() - defer cancel() + // Attempt to receive proof via proof courier service. + loc := proof.Locator{ + AssetID: &assetID, + GroupKey: addr.GroupKey, + ScriptKey: addr.ScriptKey, + OutPoint: &op, + } + addrProof, err := courier.ReceiveProof(ctx, loc) + if err != nil { + log.Errorf("Unable to receive proof using courier: %v", err) + return + } - headerVerifier := GenHeaderVerifier( - ctx, c.cfg.ChainBridge, - ) - err = c.cfg.ProofArchive.ImportProofs( - ctx, headerVerifier, c.cfg.GroupVerifier, false, - addrProof, - ) - if err != nil { - log.Errorf("unable to import proofs: %v", err) - return - } + log.Debugf("Received proof for: script_key=%x, asset_id=%x", + scriptKeyBytes, assetID[:]) - // At this point the "receive" process is complete. We - // will now notify all status event subscribers. - recvCompleteEvent := NewAssetRecvCompleteEvent( - *addr, op, - ) - err = c.publishSubscriberStatusEvent(recvCompleteEvent) - if err != nil { - log.Errorf("unable publish status event: %v", - err) - return - } - }() - } + ctx, cancel = c.CtxBlocking() + defer cancel() - return nil + headerVerifier := GenHeaderVerifier(ctx, c.cfg.ChainBridge) + err = c.cfg.ProofArchive.ImportProofs( + ctx, headerVerifier, c.cfg.GroupVerifier, false, addrProof, + ) + if err != nil { + log.Errorf("Unable to import proofs: %v", err) + return + } } // mapToTapAddr attempts to match a transaction output to a Taproot Asset @@ -595,8 +604,7 @@ func (c *Custodian) importAddrToWallet(addr *address.AddrWithKeyInfo) error { log.Infof("Imported Taproot Asset address %v into wallet", addrStr) if p2trAddr != nil { - log.Infof("watching p2tr address %v on chain", - p2trAddr.String()) + log.Infof("Watching p2tr address %v on chain", p2trAddr) } return c.cfg.AddrBook.SetAddrManaged(ctxt, addr, time.Now()) @@ -604,7 +612,7 @@ func (c *Custodian) importAddrToWallet(addr *address.AddrWithKeyInfo) error { // checkProofAvailable checks the proof storage if a proof for the given event // is already available. If it is, and it checks out, the event is updated. -func (c *Custodian) checkProofAvailable(event *address.Event) error { +func (c *Custodian) checkProofAvailable(event *address.Event) (bool, error) { ctxt, cancel := c.WithCtxQuit() defer cancel() @@ -617,46 +625,140 @@ func (c *Custodian) checkProofAvailable(event *address.Event) error { AssetID: fn.Ptr(event.Addr.AssetID), GroupKey: event.Addr.GroupKey, ScriptKey: event.Addr.ScriptKey, + OutPoint: &event.Outpoint, }) switch { case errors.Is(err, proof.ErrProofNotFound): - return nil + return false, nil case err != nil: - return fmt.Errorf("error fetching proof for event: %w", err) + return false, fmt.Errorf("error fetching proof for event: %w", + err) } - file := proof.NewEmptyFile(proof.V0) - if err := file.Decode(bytes.NewReader(blob)); err != nil { - return fmt.Errorf("error decoding proof file: %w", err) + // At this point, we expect the proof to be a full file, containing the + // whole provenance chain (as required by implementers of the + // proof.NotifyArchiver.FetchProof() method). So if we don't we can't + // continue. + if !blob.IsFile() { + return false, fmt.Errorf("expected proof to be a full file, " + + "but got something else") + } + + file, err := blob.AsFile() + if err != nil { + return false, fmt.Errorf("error extracting proof file: %w", err) } // Exit early on empty proof (shouldn't happen outside of test cases). if file.IsEmpty() { - return fmt.Errorf("archive contained empty proof file: %w", err) + return false, fmt.Errorf("archive contained empty proof file") } lastProof, err := file.LastProof() if err != nil { - return fmt.Errorf("error fetching last proof: %w", err) + return false, fmt.Errorf("error fetching last proof: %w", err) } // The proof might be an old state, let's make sure it matches our event // before marking the inbound asset transfer as complete. if AddrMatchesAsset(event.Addr, &lastProof.Asset) { - return c.setReceiveCompleted(event, lastProof, file) + return true, c.setReceiveCompleted(event, lastProof, file) } - return nil + return false, nil } // mapProofToEvent inspects a new proof and attempts to match it to an existing // and pending address event. If a proof successfully matches the desired state // of the address, that completes the inbound transfer of an asset. func (c *Custodian) mapProofToEvent(p proof.Blob) error { - file := proof.NewEmptyFile(proof.V0) - if err := file.Decode(bytes.NewReader(p)); err != nil { - return fmt.Errorf("error decoding proof file: %w", err) + // We arrive here if we are notified about a new proof. The notification + // interface allows that proof to be a single transition proof. So if + // we don't have a full file yet, we need to fetch it now. The + // proof.NotifyArchiver.FetchProof() method will return the full file as + // per its Godoc. + var ( + proofBlob = p + lastProof *proof.Proof + err error + ) + if !p.IsFile() { + log.Debugf("Received single proof, inspecting if matches event") + lastProof, err = p.AsSingleProof() + if err != nil { + return fmt.Errorf("error decoding proof: %w", err) + } + + // Before we go ahead and fetch the full file, let's make sure + // we are actually interested in this proof. We need to do this + // because we receive all transfer proofs inserted into the + // local universe here. So they could just be from a proof sync + // run and not actually be for an address we are interested in. + haveMatchingEvents := fn.AnyMapItem( + c.events, func(e *address.Event) bool { + return EventMatchesProof(e, lastProof) + }, + ) + if !haveMatchingEvents { + log.Debugf("Proof doesn't match any events, skipping.") + return nil + } + + ctxt, cancel := c.WithCtxQuit() + defer cancel() + + loc := proof.Locator{ + AssetID: fn.Ptr(lastProof.Asset.ID()), + ScriptKey: *lastProof.Asset.ScriptKey.PubKey, + OutPoint: fn.Ptr(lastProof.OutPoint()), + } + if lastProof.Asset.GroupKey != nil { + loc.GroupKey = &lastProof.Asset.GroupKey.GroupPubKey + } + + log.Debugf("Received single proof, fetching full file") + proofBlob, err = c.cfg.ProofNotifier.FetchProof(ctxt, loc) + if err != nil { + return fmt.Errorf("error fetching full proof file for "+ + "event: %w", err) + } + + // Do we already have this proof in our main archive? This + // should only be false if we got the notification from our + // local universe instead of the local proof archive (which the + // couriers use). This is mainly an optimization to make sure we + // don't unnecessarily overwrite the proofs in our main archive. + haveProof, err := c.cfg.ProofArchive.HasProof(ctxt, loc) + if err != nil { + return fmt.Errorf("error checking if proof is "+ + "available: %w", err) + } + + // We don't have the proof yet, or not in all backends, so we + // need to import it now. + if !haveProof { + headerVerifier := GenHeaderVerifier( + ctxt, c.cfg.ChainBridge, + ) + err = c.cfg.ProofArchive.ImportProofs( + ctxt, headerVerifier, c.cfg.GroupVerifier, + false, &proof.AnnotatedProof{ + Locator: loc, + Blob: proofBlob, + }, + ) + if err != nil { + return fmt.Errorf("error importing proof "+ + "file into main archive: %w", err) + } + } + } + + // Now we can be sure we have a file. + file, err := proofBlob.AsFile() + if err != nil { + return fmt.Errorf("error extracting proof file: %w", err) } // Exit early on empty proof (shouldn't happen outside of test cases). @@ -667,19 +769,22 @@ func (c *Custodian) mapProofToEvent(p proof.Blob) error { // We got the proof from the multi archiver, which verifies it before // giving it to us. So we don't have to verify them again and can - // directly look at the last state. - lastProof, err := file.LastProof() - if err != nil { - return fmt.Errorf("error fetching last proof: %w", err) + // directly look at the last state. We can skip extracting the last + // proof if we started out with a single proof in the first place, which + // we already parsed above. + if lastProof == nil { + lastProof, err = file.LastProof() + if err != nil { + return fmt.Errorf("error fetching last proof: %w", err) + } } - log.Infof("Received new proof file, version=%d, num_proofs=%d", - file.Version, file.NumProofs()) + log.Infof("Received new proof file for asset ID %s, version=%d,"+ + "num_proofs=%d", lastProof.Asset.ID().String(), file.Version, + file.NumProofs()) // Check if any of our in-flight events match the last proof's state. for _, event := range c.events { - if AddrMatchesAsset(event.Addr, &lastProof.Asset) && - event.Outpoint == lastProof.OutPoint() { - + if EventMatchesProof(event, lastProof) { // Importing a proof already creates the asset in the // database. Therefore, all we need to do is update the // state of the address event to mark it as completed @@ -702,13 +807,23 @@ func (c *Custodian) mapProofToEvent(p proof.Blob) error { func (c *Custodian) setReceiveCompleted(event *address.Event, lastProof *proof.Proof, proofFile *proof.File) error { + // At this point the "receive" process is complete. We will now notify + // all status event subscribers. + receiveCompleteEvent := NewAssetRecvCompleteEvent( + *event.Addr.Tap, event.Outpoint, + ) + err := c.publishSubscriberStatusEvent(receiveCompleteEvent) + if err != nil { + log.Errorf("Unable publish status event: %v", err) + } + // The proof is created after a single confirmation. To make sure we // notice if the anchor transaction is re-organized out of the chain, we // give all the not-yet-sufficiently-buried proofs in the received proof // file to the re-org watcher and replace the updated proof in the local // proof archive if a re-org happens. The sender will do the same, so no // re-send of the proof is necessary. - err := c.cfg.ProofWatcher.MaybeWatch( + err = c.cfg.ProofWatcher.MaybeWatch( proofFile, c.cfg.ProofWatcher.DefaultUpdateCallback(), ) if err != nil { @@ -823,3 +938,9 @@ func AddrMatchesAsset(addr *address.AddrWithKeyInfo, a *asset.Asset) bool { return addr.AssetID == a.ID() && groupKeyEqual && addr.ScriptKey.IsEqual(a.ScriptKey.PubKey) } + +// EventMatchesProof returns true if the given event matches the given proof. +func EventMatchesProof(event *address.Event, p *proof.Proof) bool { + return AddrMatchesAsset(event.Addr, &p.Asset) && + event.Outpoint == p.OutPoint() +} diff --git a/tapgarden/custodian_test.go b/tapgarden/custodian_test.go index 21f9276c9..9f2fcf860 100644 --- a/tapgarden/custodian_test.go +++ b/tapgarden/custodian_test.go @@ -1,9 +1,12 @@ package tapgarden_test import ( + "bytes" "context" "database/sql" + "fmt" "math/rand" + "net/url" "testing" "time" @@ -13,15 +16,16 @@ import ( "github.com/lightninglabs/lndclient" "github.com/lightninglabs/taproot-assets/address" "github.com/lightninglabs/taproot-assets/asset" + "github.com/lightninglabs/taproot-assets/commitment" "github.com/lightninglabs/taproot-assets/fn" "github.com/lightninglabs/taproot-assets/internal/test" "github.com/lightninglabs/taproot-assets/proof" "github.com/lightninglabs/taproot-assets/tapdb" - "github.com/lightninglabs/taproot-assets/tapdb/sqlc" "github.com/lightninglabs/taproot-assets/tapgarden" "github.com/lightninglabs/taproot-assets/tapscript" "github.com/lightningnetwork/lnd/clock" "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lntest/wait" "github.com/stretchr/testify/require" ) @@ -34,11 +38,9 @@ var ( ) // newAddrBook creates a new instance of the TapAddressBook book. -func newAddrBook(t *testing.T, keyRing *tapgarden.MockKeyRing, +func newAddrBookForDB(db *tapdb.BaseDB, keyRing *tapgarden.MockKeyRing, syncer *tapgarden.MockAssetSyncer) (*address.Book, - *tapdb.TapAddressBook, sqlc.Querier) { - - db := tapdb.NewTestDB(t) + *tapdb.TapAddressBook) { txCreator := func(tx *sql.Tx) tapdb.AddrBook { return db.WithTx(tx) @@ -54,12 +56,12 @@ func newAddrBook(t *testing.T, keyRing *tapgarden.MockKeyRing, Chain: *chainParams, KeyRing: keyRing, }) - return book, tapdbBook, db + return book, tapdbBook } // newProofArchive creates a new instance of the MultiArchiver. -func newProofArchive(t *testing.T) (*proof.MultiArchiver, *tapdb.AssetStore) { - db := tapdb.NewTestDB(t) +func newProofArchiveForDB(t *testing.T, db *tapdb.BaseDB) (*proof.MultiArchiver, + *tapdb.AssetStore) { txCreator := func(tx *sql.Tx) tapdb.ActiveAssetsStore { return db.WithTx(tx) @@ -90,7 +92,7 @@ type custodianHarness struct { addrBook *address.Book syncer *tapgarden.MockAssetSyncer assetDB *tapdb.AssetStore - proofArchive *proof.MultiArchiver + courier *proof.MockProofCourier } // assertStartup makes sure the custodian was started correctly. @@ -114,6 +116,48 @@ func (h *custodianHarness) eventually(fn func() bool) { require.Eventually(h.t, fn, testTimeout, testPollInterval) } +// assertEventsPresent makes sure that the given number of events is present in +// the address book, then returns those events. +func (h *custodianHarness) assertEventsPresent(numEvents int, + status address.Status) []*address.Event { + + ctx := context.Background() + ctxt, cancel := context.WithTimeout(ctx, testTimeout) + defer cancel() + + // Only one event should be registered though, as we've only created one + // transaction. + var finalEvents []*address.Event + err := wait.NoError(func() error { + events, err := h.tapdbBook.QueryAddrEvents( + ctxt, address.EventQueryParams{}, + ) + if err != nil { + return err + } + + if len(events) != numEvents { + return fmt.Errorf("wanted %d events but got %d", + numEvents, len(events)) + } + + for idx, event := range events { + if event.Status != status { + return fmt.Errorf("event %d has status %v "+ + "but wanted %v", idx, event.Status, + status) + } + } + + finalEvents = events + + return nil + }, testTimeout) + require.NoError(h.t, err) + + return finalEvents +} + // assertAddrsRegistered makes sure that for each of the given addresses a // pubkey was imported into the wallet. func (h *custodianHarness) assertAddrsRegistered( @@ -138,8 +182,14 @@ func newHarness(t *testing.T, walletAnchor := tapgarden.NewMockWalletAnchor() keyRing := tapgarden.NewMockKeyRing() syncer := tapgarden.NewMockAssetSyncer() - addrBook, tapdbBook, _ := newAddrBook(t, keyRing, syncer) - proofArchive, assetDB := newProofArchive(t) + db := tapdb.NewTestDB(t) + addrBook, tapdbBook := newAddrBookForDB(db.BaseDB, keyRing, syncer) + _, assetDB := newProofArchiveForDB(t, db.BaseDB) + courier := proof.NewMockProofCourier() + courierDispatch := &proof.MockProofCourierDispatcher{ + Courier: courier, + } + proofWatcher := &tapgarden.MockProofWatcher{} ctxb := context.Background() for _, initialAddr := range initialAddrs { @@ -148,13 +198,15 @@ func newHarness(t *testing.T, } cfg := &tapgarden.CustodianConfig{ - ChainParams: chainParams, - ChainBridge: chainBridge, - WalletAnchor: walletAnchor, - AddrBook: addrBook, - ProofArchive: proofArchive, - ProofNotifier: assetDB, - ErrChan: make(chan error, 1), + ChainParams: chainParams, + ChainBridge: chainBridge, + WalletAnchor: walletAnchor, + AddrBook: addrBook, + ProofArchive: assetDB, + ProofNotifier: assetDB, + ProofCourierDispatcher: courierDispatch, + ProofWatcher: proofWatcher, + ErrChan: make(chan error, 1), } return &custodianHarness{ t: t, @@ -167,20 +219,21 @@ func newHarness(t *testing.T, addrBook: addrBook, syncer: syncer, assetDB: assetDB, - proofArchive: proofArchive, + courier: courier, } } -func randAddr(h *custodianHarness) *address.AddrWithKeyInfo { - proofCourierAddr := address.RandProofCourierAddr(h.t) +func randAddr(h *custodianHarness) (*address.AddrWithKeyInfo, *asset.Genesis) { addr, genesis, group := address.RandAddr( - h.t, &address.RegressionNetTap, proofCourierAddr, + h.t, &address.RegressionNetTap, url.URL{ + Scheme: "mock", + }, ) err := h.tapdbBook.InsertAssetGen(context.Background(), genesis, group) require.NoError(h.t, err) - return addr + return addr, genesis } func randWalletTx(addr *address.AddrWithKeyInfo) (int, *lndclient.Transaction) { @@ -193,9 +246,12 @@ func randWalletTx(addr *address.AddrWithKeyInfo) (int, *lndclient.Transaction) { taprootOutput := rand.Intn(numOutputs) for idx := 0; idx < numInputs; idx++ { - in := &wire.TxIn{} - _, _ = rand.Read(in.PreviousOutPoint.Hash[:]) - in.PreviousOutPoint.Index = rand.Uint32() + in := &wire.TxIn{ + PreviousOutPoint: wire.OutPoint{ + Hash: test.RandHash(), + Index: rand.Uint32(), + }, + } tx.Tx.AddTxIn(in) tx.PreviousOutpoints = append( tx.PreviousOutpoints, &lnrpc.PreviousOutPoint{ @@ -231,6 +287,71 @@ func randWalletTx(addr *address.AddrWithKeyInfo) (int, *lndclient.Transaction) { return taprootOutput, tx } +func randProof(t *testing.T, outputIndex int, tx *wire.MsgTx, + genesis *asset.Genesis, + addr *address.AddrWithKeyInfo) *proof.AnnotatedProof { + + a := asset.Asset{ + Version: asset.V0, + Genesis: *genesis, + Amount: addr.Amount, + ScriptKey: asset.NewScriptKey(&addr.ScriptKey), + } + if addr.GroupKey != nil { + a.GroupKey = &asset.GroupKey{ + GroupPubKey: *addr.GroupKey, + } + } + + p := &proof.Proof{ + PrevOut: wire.OutPoint{}, + BlockHeader: wire.BlockHeader{ + Timestamp: time.Unix(rand.Int63(), 0), + }, + AnchorTx: *tx, + TxMerkleProof: proof.TxMerkleProof{}, + Asset: a, + InclusionProof: proof.TaprootProof{ + InternalKey: test.RandPubKey(t), + OutputIndex: uint32(outputIndex), + }, + } + + f, err := proof.NewFile(proof.V0, *p) + require.NoError(t, err) + + var buf bytes.Buffer + require.NoError(t, f.Encode(&buf)) + + ac, err := commitment.NewAssetCommitment(&a) + require.NoError(t, err) + tc, err := commitment.NewTapCommitment(ac) + require.NoError(t, err) + + op := wire.OutPoint{ + Hash: tx.TxHash(), + Index: uint32(outputIndex), + } + + return &proof.AnnotatedProof{ + Locator: proof.Locator{ + AssetID: fn.Ptr(genesis.ID()), + GroupKey: addr.GroupKey, + ScriptKey: addr.ScriptKey, + OutPoint: &op, + }, + Blob: buf.Bytes(), + AssetSnapshot: &proof.AssetSnapshot{ + Asset: &a, + OutPoint: op, + AnchorTx: tx, + OutputIndex: uint32(outputIndex), + InternalKey: test.RandPubKey(t), + ScriptRoot: tc, + }, + } +} + // insertAssetInfo starts a background goroutine that receives asset info that // was fetched from the asset syncer, and stores it in the address book. This // simulates asset bootstrapping that would occur during universe sync. @@ -279,7 +400,7 @@ func TestCustodianNewAddr(t *testing.T) { <-h.keyRing.ReqKeys }() ctx := context.Background() - addr := randAddr(h) + addr, _ := randAddr(h) proofCourierAddr := address.RandProofCourierAddr(t) dbAddr, err := h.addrBook.NewAddress( ctx, addr.AssetID, addr.Amount, nil, proofCourierAddr, @@ -373,6 +494,8 @@ func TestBookAssetSyncer(t *testing.T) { close(quitAssetWatcher) } +// TestTransactionHandling tests that the custodian correctly handles incoming +// transactions. func TestTransactionHandling(t *testing.T) { h := newHarness(t, nil) @@ -382,15 +505,21 @@ func TestTransactionHandling(t *testing.T) { const numAddrs = 5 addrs := make([]*address.AddrWithKeyInfo, numAddrs) + genesis := make([]*asset.Genesis, numAddrs) for i := 0; i < numAddrs; i++ { - addrs[i] = randAddr(h) + addrs[i], genesis[i] = randAddr(h) err := h.tapdbBook.InsertAddrs(ctx, *addrs[i]) require.NoError(t, err) } outputIdx, tx := randWalletTx(addrs[0]) + tx.Confirmations = 1 h.walletAnchor.Transactions = append(h.walletAnchor.Transactions, *tx) + mockProof := randProof(t, outputIdx, tx.Tx, genesis[0], addrs[0]) + err := h.courier.DeliverProof(nil, mockProof) + require.NoError(t, err) + require.NoError(t, h.c.Start()) t.Cleanup(func() { require.NoError(t, h.c.Stop()) @@ -402,21 +531,92 @@ func TestTransactionHandling(t *testing.T) { // Only one event should be registered though, as we've only created one // transaction. - h.eventually(func() bool { - events, err := h.tapdbBook.QueryAddrEvents( - ctx, address.EventQueryParams{}, - ) - require.NoError(t, err) + events := h.assertEventsPresent(1, address.StatusCompleted) + require.EqualValues(t, outputIdx, events[0].Outpoint.Index) - if len(events) != 1 { - t.Logf("Got %d events", len(events)) - return false - } + dbProof, err := h.assetDB.FetchProof(ctx, mockProof.Locator) + require.NoError(t, err) + require.EqualValues(t, mockProof.Blob, dbProof) +} + +// TestTransactionConfirmedOnly tests that the custodian only starts the proof +// courier once a transaction has been confirmed. We also test that it correctly +// re-tries fetching proofs using a proof courier after it has been restarted. +func TestTransactionConfirmedOnly(t *testing.T) { + t.Parallel() - require.EqualValues(t, outputIdx, events[0].Outpoint.Index) + runTransactionConfirmedOnlyTest(t, false) + runTransactionConfirmedOnlyTest(t, true) +} + +// runTransactionConfirmedOnlyTest runs the transaction confirmed only test, +// optionally restarting the custodian in the middle. +func runTransactionConfirmedOnlyTest(t *testing.T, withRestart bool) { + h := newHarness(t, nil) - return true + // Before we start the custodian, we create a few random addresses. + ctx := context.Background() + + const numAddrs = 5 + addrs := make([]*address.AddrWithKeyInfo, numAddrs) + genesis := make([]*asset.Genesis, numAddrs) + for i := 0; i < numAddrs; i++ { + addrs[i], genesis[i] = randAddr(h) + err := h.tapdbBook.InsertAddrs(ctx, *addrs[i]) + require.NoError(t, err) + } + + // We start the custodian and make sure it's started up correctly. This + // should add pending events for each of the addresses. + require.NoError(t, h.c.Start()) + t.Cleanup(func() { + require.NoError(t, h.c.Stop()) }) + h.assertStartup() + + // We expect all addresses to be watched by the wallet now. + h.assertAddrsRegistered(addrs...) + + // To make sure the custodian adds address events for each address, we + // need to signal an unconfirmed transaction for each of them now. + outputIndexes := make([]int, numAddrs) + transactions := make([]*lndclient.Transaction, numAddrs) + for idx := range addrs { + outputIndex, tx := randWalletTx(addrs[idx]) + outputIndexes[idx] = outputIndex + transactions[idx] = tx + h.walletAnchor.SubscribeTx <- *tx + + // We also simulate that the proof courier has all the proofs + // it needs. + mockProof := randProof( + t, outputIndexes[idx], tx.Tx, genesis[idx], addrs[idx], + ) + _ = h.courier.DeliverProof(nil, mockProof) + } + + // We want events to be created for each address, they should be in the + // state where they detected a transaction. + h.assertEventsPresent(numAddrs, address.StatusTransactionDetected) + + // In case we're testing with a restart, we now restart the custodian. + if withRestart { + require.NoError(t, h.c.Stop()) + + h.c = tapgarden.NewCustodian(h.cfg) + require.NoError(t, h.c.Start()) + h.assertStartup() + } + + // Now we confirm the transactions. This should trigger the custodian to + // fetch the proof for each of the addresses. + for idx := range transactions { + tx := transactions[idx] + tx.Confirmations = 1 + h.walletAnchor.SubscribeTx <- *tx + } + + h.assertEventsPresent(numAddrs, address.StatusCompleted) } func mustMakeAddr(t *testing.T, diff --git a/tapgarden/mock.go b/tapgarden/mock.go index 160ef3b0e..887272e53 100644 --- a/tapgarden/mock.go +++ b/tapgarden/mock.go @@ -553,6 +553,12 @@ func (m *MockProofArchive) FetchProof(ctx context.Context, return nil, nil } +func (m *MockProofArchive) HasProof(ctx context.Context, + id proof.Locator) (bool, error) { + + return false, nil +} + func (m *MockProofArchive) FetchProofs(ctx context.Context, id asset.ID) ([]*proof.AnnotatedProof, error) { diff --git a/tapgarden/re-org_watcher.go b/tapgarden/re-org_watcher.go index 4b650d60c..3bb20201e 100644 --- a/tapgarden/re-org_watcher.go +++ b/tapgarden/re-org_watcher.go @@ -68,7 +68,7 @@ type ReOrgWatcherConfig struct { // ProofArchive is the storage backend for proofs to which we store // updated proofs. - ProofArchive proof.NotifyArchiver + ProofArchive proof.Archiver // NonBuriedAssetFetcher is a function that returns all assets that are // not yet sufficiently deep buried. diff --git a/universe/auto_syncer.go b/universe/auto_syncer.go index 4bed26bae..23934d826 100644 --- a/universe/auto_syncer.go +++ b/universe/auto_syncer.go @@ -71,8 +71,18 @@ type FederationPushReq struct { // Leaf is the new leaf to add. Leaf *Leaf + // resp is a channel that will be sent the asset issuance/transfer + // proof and corresponding universe/multiverse inclusion proofs if the + // federation proof push was successful. resp chan *Proof - err chan error + + // LogProofSync is a boolean that indicates, if true, that the proof + // leaf sync attempt should be logged and actively managed to ensure + // that the federation push procedure is repeated in the event of a + // failure. + LogProofSync bool + + err chan error } // FederationProofBatchPushReq is used to push out a batch of universe proof @@ -97,8 +107,12 @@ type FederationEnvoy struct { stopOnce sync.Once + // pushRequests is a channel that will be sent new requests to push out + // proof leaves to the federation. pushRequests chan *FederationPushReq + // batchPushRequests is a channel that will be sent new requests to push + // out batch proof leaves to the federation. batchPushRequests chan *FederationProofBatchPushReq } @@ -161,6 +175,11 @@ func (f *FederationEnvoy) Start() error { return nil } +// Close frees up any ephemeral resources allocated by the envoy. +func (f *FederationEnvoy) Close() error { + return nil +} + // Stop stops all active goroutines. func (f *FederationEnvoy) Stop() error { f.stopOnce.Do(func() { @@ -219,48 +238,105 @@ func (f *FederationEnvoy) syncServerState(ctx context.Context, return nil } -// pushProofToFederation attempts to push out a new proof to the current -// federation in parallel. -func (f *FederationEnvoy) pushProofToFederation(uniID Identifier, key LeafKey, - leaf *Leaf) { +// pushProofToServer attempts to push out a new proof to the target server. +func (f *FederationEnvoy) pushProofToServer(ctx context.Context, + uniID Identifier, key LeafKey, leaf *Leaf, addr ServerAddr) error { - // Fetch all universe servers in our federation. - fedServers, err := f.tryFetchServers() - if err != nil || len(fedServers) == 0 { - return + remoteUniverseServer, err := f.cfg.NewRemoteRegistrar(addr) + if err != nil { + return fmt.Errorf("cannot push proof unable to connect "+ + "to remote server(%v): %w", addr.HostStr(), err) } - log.Infof("Pushing new proof to %v federation members, proof_key=%v", - len(fedServers), spew.Sdump(key)) + _, err = remoteUniverseServer.UpsertProofLeaf( + ctx, uniID, key, leaf, + ) + if err != nil { + return fmt.Errorf("cannot push proof to remote "+ + "server(%v): %w", addr.HostStr(), err) + } - ctx, cancel := f.WithCtxQuitNoTimeout() - defer cancel() + return nil +} + +// pushProofToServerLogged attempts to push out a new proof to the target +// server, and logs the sync attempt. +func (f *FederationEnvoy) pushProofToServerLogged(ctx context.Context, + uniID Identifier, key LeafKey, leaf *Leaf, addr ServerAddr) error { + + // Ensure that we have a pending sync log entry for this + // leaf and server pair. This will allow us to handle all + // pending syncs in the event of a restart or at a different + // point in the envoy. + _, err := f.cfg.FederationDB.UpsertFederationProofSyncLog( + ctx, uniID, key, addr, SyncDirectionPush, + ProofSyncStatusPending, true, + ) + if err != nil { + return fmt.Errorf("unable to log proof sync as pending: %w", + err) + } + + // Push the proof to the remote server. + err = f.pushProofToServer(ctx, uniID, key, leaf, addr) + if err != nil { + return fmt.Errorf("cannot push proof to remote server(%v): %w", + addr.HostStr(), err) + } + + // We did not encounter an error in our proof push + // attempt. Log the proof sync attempt as complete. + _, err = f.cfg.FederationDB.UpsertFederationProofSyncLog( + ctx, uniID, key, addr, SyncDirectionPush, + ProofSyncStatusComplete, false, + ) + if err != nil { + return fmt.Errorf("unable to log proof sync attempt: %w", err) + } + + return nil +} + +// pushProofToFederation attempts to push out a new proof to the current +// federation in parallel. +func (f *FederationEnvoy) pushProofToFederation(ctx context.Context, + uniID Identifier, key LeafKey, leaf *Leaf, fedServers []ServerAddr, + logProofSync bool) { + + log.Infof("Pushing proof to %v federation members, proof_key=%v", + len(fedServers), spew.Sdump(key)) // To push a new proof out, we'll attempt to dial to the remote // registrar, then will attempt to push the new proof directly to the // register. pushNewProof := func(ctx context.Context, addr ServerAddr) error { - remoteUniverseServer, err := f.cfg.NewRemoteRegistrar(addr) - if err != nil { - log.Warnf("cannot push proof unable to connect "+ - "to remote server(%v): %v", addr.HostStr(), - err) + // If we are logging proof sync attempts, we will use the + // logged version of the push function. + if logProofSync { + err := f.pushProofToServerLogged( + ctx, uniID, key, leaf, addr, + ) + if err != nil { + log.Warnf("Cannot push proof via logged "+ + "server push: %v", err) + } + return nil } - _, err = remoteUniverseServer.UpsertProofLeaf( - ctx, uniID, key, leaf, - ) + // If we are not logging proof sync attempts, we will use the + // non-logged version of the push function. + err := f.pushProofToServer(ctx, uniID, key, leaf, addr) if err != nil { - log.Warnf("cannot push proof to remote "+ - "server(%v): %v", addr.HostStr(), err) + log.Warnf("Cannot push proof: %v", err) } + return nil } // To conclude, we'll attempt to push the new proof to all the universe // servers in parallel. - err = fn.ParSlice(ctx, fedServers, pushNewProof) + err := fn.ParSlice(ctx, fedServers, pushNewProof) if err != nil { // TODO(roasbeef): retry in the background until successful? log.Errorf("unable to push proof to federation: %v", err) @@ -268,6 +344,57 @@ func (f *FederationEnvoy) pushProofToFederation(uniID Identifier, key LeafKey, } } +// filterProofSyncPending filters out servers that have already been synced +// with for the given leaf. +func (f *FederationEnvoy) filterProofSyncPending(fedServers []ServerAddr, + uniID Identifier, key LeafKey) ([]ServerAddr, error) { + + // If there are no servers to filter, then we'll return early. This + // saves from querying the database unnecessarily. + if len(fedServers) == 0 { + return nil, nil + } + + ctx, cancel := f.WithCtxQuit() + defer cancel() + + // Select all sync push complete log entries for the given universe + // leaf. If there are any servers which are sync complete within this + // log set, we will filter them out of our target server set. + logs, err := f.cfg.FederationDB.QueryFederationProofSyncLog( + ctx, uniID, key, SyncDirectionPush, + ProofSyncStatusComplete, + ) + if err != nil { + return nil, fmt.Errorf("unable to query federation sync log: %w", + err) + } + + // Construct a map of servers that have already been synced with for the + // given leaf. + syncedServers := make(map[string]struct{}) + for idx := range logs { + logEntry := logs[idx] + syncedServers[logEntry.ServerAddr.HostStr()] = struct{}{} + } + + // Filter out servers that we've already pushed to. + filteredFedServers := fn.Filter(fedServers, func(a ServerAddr) bool { + // Filter out servers that have a log entry with sync status + // complete. + if _, ok := syncedServers[a.HostStr()]; ok { + return false + } + + // By this point we haven't found logs corresponding to the + // given server, we will therefore return true and include the + // server as a sync target for the given leaf. + return true + }) + + return filteredFedServers, nil +} + // syncer is the main goroutine that's responsible for interacting with the // federation envoy. It also accepts incoming requests to push out new updates // to the federation. @@ -281,13 +408,25 @@ func (f *FederationEnvoy) syncer() { syncTicker := time.NewTicker(f.cfg.SyncInterval) defer syncTicker.Stop() + // We'll use a timeout that's slightly less than the sync interval to + // help avoid ticking into a new sync event before the previous event + // has finished. + syncContextTimeout := f.cfg.SyncInterval - 1*time.Second + if syncContextTimeout < 0 { + // If the sync interval is less than a second, then we'll use + // the sync interval as the timeout. + syncContextTimeout = f.cfg.SyncInterval + } + for { select { // A new sync event has just been triggered, so we'll attempt // to synchronize state with all the active universe servers in // the federation. case <-syncTicker.C: - // Error propogation is handled in tryFetchServers, we + log.Debug("Federation envoy handling new tick event") + + // Error propagation is handled in tryFetchServers, we // only need to exit here. fedServers, err := f.tryFetchServers() if err != nil { @@ -305,11 +444,60 @@ func (f *FederationEnvoy) syncer() { continue } + // After we've synced with the federation, we'll + // attempt to push out any pending proofs that we + // haven't yet completed. + ctxFetchLog, cancelFetchLog := f.WithCtxQuitNoTimeout() + syncDirection := SyncDirectionPush + db := f.cfg.FederationDB + logEntries, err := db.FetchPendingProofsSyncLog( + ctxFetchLog, &syncDirection, + ) + cancelFetchLog() + if err != nil { + log.Warnf("unable to query pending push "+ + "sync log: %w", err) + continue + } + + if len(logEntries) > 0 { + log.Debugf("Handling pending proof sync log "+ + "entries (entries_count=%d)", + len(logEntries)) + } + + // TODO(ffranr): Take account of any new servers that + // have been added since the last time we populated the + // log for a given proof leaf. Pending proof sync log + // entries are only relevant for the set of servers + // that existed at the time the log entry was created. + // If a new server is added, then we should create a + // new log entry for the new server. + + for idx := range logEntries { + entry := logEntries[idx] + + servers := []ServerAddr{ + entry.ServerAddr, + } + + ctxPush, cancelPush := + f.CtxBlockingCustomTimeout( + syncContextTimeout, + ) + f.pushProofToFederation( + ctxPush, entry.UniID, entry.LeafKey, + &entry.Leaf, servers, true, + ) + cancelPush() + } + // A new push request has just arrived. We'll perform a // asynchronous registration with the local Universe registrar, // then push it out in an async manner to the federation // members. case pushReq := <-f.pushRequests: + log.Debug("Federation envoy handling push request") ctx, cancel := f.WithCtxQuit() // First, we'll attempt to registrar the proof leaf with @@ -333,13 +521,53 @@ func (f *FederationEnvoy) syncer() { // proof out to the federation in the background. pushReq.resp <- newProof - // With the response sent above, we'll push this out to - // all the Universe servers in the background. - go f.pushProofToFederation( - pushReq.ID, pushReq.Key, pushReq.Leaf, + // Fetch all universe servers in our federation. + fedServers, err := f.tryFetchServers() + if err != nil { + err := fmt.Errorf("unable to fetch "+ + "federation servers: %w", err) + log.Warnf(err.Error()) + pushReq.err <- err + continue + } + + if len(fedServers) == 0 { + log.Warnf("could not find any federation " + + "servers") + continue + } + + if pushReq.LogProofSync { + // We are attempting to sync using the + // logged proof sync procedure. We will + // therefore narrow down the set of target + // servers based on the sync log. Only servers + // that are not yet push sync complete will be + // targeted. + fedServers, err = f.filterProofSyncPending( + fedServers, pushReq.ID, pushReq.Key, + ) + if err != nil { + log.Warnf("failed to filter " + + "federation servers") + continue + } + } + + // With the response sent above, we'll push this + // out to all the Universe servers in the + // background. + ctxPush, cancelPush := f.WithCtxQuitNoTimeout() + f.pushProofToFederation( + ctxPush, pushReq.ID, pushReq.Key, + pushReq.Leaf, fedServers, + pushReq.LogProofSync, ) + cancelPush() case pushReq := <-f.batchPushRequests: + log.Debug("Federation envoy handling batch push " + + "request") ctx, cancel := f.WithCtxQuitNoTimeout() // First, we'll attempt to registrar the proof leaf with @@ -362,16 +590,34 @@ func (f *FederationEnvoy) syncer() { // we'll return back to the caller. pushReq.resp <- struct{}{} + // Fetch all universe servers in our federation. + fedServers, err := f.tryFetchServers() + if err != nil { + err := fmt.Errorf("unable to fetch "+ + "federation servers: %w", err) + log.Warnf(err.Error()) + pushReq.err <- err + continue + } + + if len(fedServers) == 0 { + log.Warnf("could not find any federation " + + "servers") + continue + } + // With the response sent above, we'll push this out to // all the Universe servers in the background. - go func() { - for idx := range pushReq.Batch { - item := pushReq.Batch[idx] - f.pushProofToFederation( - item.ID, item.Key, item.Leaf, - ) - } - }() + ctxPush, cancelPush := f.WithCtxQuitNoTimeout() + for idx := range pushReq.Batch { + item := pushReq.Batch[idx] + + f.pushProofToFederation( + ctxPush, item.ID, item.Key, item.Leaf, + fedServers, item.LogProofSync, + ) + } + cancelPush() case <-f.Quit: return @@ -387,12 +633,18 @@ func (f *FederationEnvoy) syncer() { func (f *FederationEnvoy) UpsertProofLeaf(_ context.Context, id Identifier, key LeafKey, leaf *Leaf) (*Proof, error) { + // If we're attempting to push an issuance proof, then we'll ensure + // that we track the sync attempt to ensure that we retry in the event + // of a failure. + logProofSync := id.ProofType == ProofTypeIssuance + pushReq := &FederationPushReq{ - ID: id, - Key: key, - Leaf: leaf, - resp: make(chan *Proof, 1), - err: make(chan error, 1), + ID: id, + Key: key, + Leaf: leaf, + LogProofSync: logProofSync, + resp: make(chan *Proof, 1), + err: make(chan error, 1), } if !fn.SendOrQuit(f.pushRequests, pushReq, f.Quit) { diff --git a/universe/base.go b/universe/base.go index 11f09e7f1..03aadf95f 100644 --- a/universe/base.go +++ b/universe/base.go @@ -71,6 +71,11 @@ func NewArchive(cfg ArchiveConfig) *Archive { return a } +// Close closes the archive, stopping all goroutines and freeing all resources. +func (a *Archive) Close() error { + return nil +} + // fetchUniverse returns the base universe instance for the passed identifier. // The universe will be loaded in on demand if it has not been seen before. func (a *Archive) fetchUniverse(id Identifier) BaseBackend { @@ -220,7 +225,7 @@ func (a *Archive) UpsertProofLeaf(ctx context.Context, id Identifier, ctx, id, key, &newProof, prevAssetSnapshot, ) if err != nil { - return nil, err + return nil, fmt.Errorf("unable to verify proof: %w", err) } // Now that we know the proof is valid, we'll insert it into the base diff --git a/universe/interface.go b/universe/interface.go index 5fdf19adb..78b8ae60d 100644 --- a/universe/interface.go +++ b/universe/interface.go @@ -84,6 +84,86 @@ func (i *Identifier) StringForLog() string { i.String(), i.AssetID[:], groupKey, i.ProofType) } +// NewUniIDFromAsset creates a new universe ID from an asset. +func NewUniIDFromAsset(a asset.Asset) Identifier { + proofType := ProofTypeTransfer + if a.IsGenesisAsset() { + proofType = ProofTypeIssuance + } + + if a.GroupKey != nil { + return Identifier{ + GroupKey: &a.GroupKey.GroupPubKey, + ProofType: proofType, + } + } + + return Identifier{ + AssetID: a.ID(), + ProofType: proofType, + } +} + +// NewUniIDFromRawArgs creates a new universe ID from the raw arguments. The +// asset ID bytes and group key bytes are mutually exclusive. If the group key +// bytes are set, then the asset ID bytes will be ignored. +// This function is useful in deriving a universe ID from the data stored in the +// database. +func NewUniIDFromRawArgs(assetIDBytes []byte, groupKeyBytes []byte, + proofTypeStr string) (Identifier, error) { + + proofType, err := ParseStrProofType(proofTypeStr) + if err != nil { + return Identifier{}, err + } + + // If the group key bytes are set, then we'll preferentially populate + // the universe ID with that and not the asset ID. + if len(groupKeyBytes) != 0 { + groupKey, err := parseGroupKey(groupKeyBytes) + if err != nil { + return Identifier{}, fmt.Errorf("unable to parse "+ + "group key: %w", err) + } + return Identifier{ + GroupKey: groupKey, + ProofType: proofType, + }, nil + } + + // At this point we know that the group key bytes are nil, so we'll + // attempt to parse the asset ID bytes. + if len(assetIDBytes) == 0 { + return Identifier{}, fmt.Errorf("asset ID bytes and group " + + "key bytes are both nil") + } + + var assetID asset.ID + copy(assetID[:], assetIDBytes) + + return Identifier{ + AssetID: assetID, + ProofType: proofType, + }, nil +} + +// parseGroupKey parses a group key from bytes, which can be in either the +// Schnorr or Compressed format. +func parseGroupKey(scriptKey []byte) (*btcec.PublicKey, error) { + switch len(scriptKey) { + case schnorr.PubKeyBytesLen: + return schnorr.ParsePubKey(scriptKey) + + // Truncate the key and then parse as a Schnorr key. + case btcec.PubKeyBytesLenCompressed: + return schnorr.ParsePubKey(scriptKey[1:]) + + default: + return nil, fmt.Errorf("unknown script key length: %v", + len(scriptKey)) + } +} + // ValidateProofUniverseType validates that the proof type matches the universe // identifier proof type. func ValidateProofUniverseType(a *asset.Asset, uniID Identifier) error { @@ -321,6 +401,9 @@ type Registrar interface { // UpsertProofLeaf upserts a proof leaf within the target universe tree. UpsertProofLeaf(ctx context.Context, id Identifier, key LeafKey, leaf *Leaf) (*Proof, error) + + // Close is used to shutdown the active registrar instance. + Close() error } // Item contains the data fields necessary to insert/update a proof leaf @@ -337,6 +420,12 @@ type Item struct { // MetaReveal is the meta reveal associated with the given proof leaf. MetaReveal *proof.MetaReveal + + // LogProofSync is a boolean that indicates, if true, that the proof + // leaf sync attempt should be logged and actively managed to ensure + // that the federation push procedure is repeated in the event of a + // failure. + LogProofSync bool } // BatchRegistrar is an interface that allows a caller to register a batch of @@ -519,6 +608,9 @@ type DiffEngine interface { // of diff FetchProofLeaf(ctx context.Context, id Identifier, key LeafKey) ([]*Proof, error) + + // Close is used to shutdown the active diff engine instance. + Close() error } // Commitment is an on chain universe commitment. This includes the merkle @@ -697,10 +789,113 @@ type FederationSyncConfigDB interface { uniSyncConfigs []*FedUniSyncConfig) error } -// FederationDB is used for CRUD operations related to federation sync config -// and tracked servers. +// SyncDirection is the direction of a proof sync. +type SyncDirection string + +const ( + // SyncDirectionPush indicates that the sync is a push sync (from the local + // server to the remote server). + SyncDirectionPush SyncDirection = "push" + + // SyncDirectionPull indicates that the sync is a pull sync (from the remote + // server to the local server). + SyncDirectionPull SyncDirection = "pull" +) + +// ParseStrSyncDirection parses a string into a SyncDirection. +func ParseStrSyncDirection(s string) (SyncDirection, error) { + switch s { + case string(SyncDirectionPush): + return SyncDirectionPush, nil + case string(SyncDirectionPull): + return SyncDirectionPull, nil + default: + return "", fmt.Errorf("unknown sync direction: %v", s) + } +} + +// ProofSyncStatus is the status of a proof sync. +type ProofSyncStatus string + +const ( + // ProofSyncStatusPending indicates that the sync is pending. + ProofSyncStatusPending ProofSyncStatus = "pending" + + // ProofSyncStatusComplete indicates that the sync is complete. + ProofSyncStatusComplete ProofSyncStatus = "complete" +) + +// ParseStrProofSyncStatus parses a string into a ProofSyncStatus. +func ParseStrProofSyncStatus(s string) (ProofSyncStatus, error) { + switch s { + case string(ProofSyncStatusPending): + return ProofSyncStatusPending, nil + case string(ProofSyncStatusComplete): + return ProofSyncStatusComplete, nil + default: + return "", fmt.Errorf("unknown proof sync status: %v", s) + } +} + +// ProofSyncLogEntry is a log entry for a proof sync. +type ProofSyncLogEntry struct { + // Timestamp is the timestamp of the log entry. + Timestamp time.Time + + // SyncStatus is the status of the sync. + SyncStatus ProofSyncStatus + + // SyncDirection is the direction of the sync. + SyncDirection SyncDirection + + // AttemptCounter is the number of times the sync has been attempted. + AttemptCounter int64 + + // ServerAddr is the address of the sync counterparty server. + ServerAddr ServerAddr + + // UniID is the identifier of the universe associated with the sync event. + UniID Identifier + + // LeafKey is the leaf key associated with the sync event. + LeafKey LeafKey + + // Leaf is the leaf associated with the sync event. + Leaf Leaf +} + +// FederationProofSyncLog is used for CRUD operations relating to the federation +// proof sync log. +type FederationProofSyncLog interface { + // UpsertFederationProofSyncLog upserts a federation proof sync log + // entry for a given universe server and proof. + UpsertFederationProofSyncLog(ctx context.Context, uniID Identifier, + leafKey LeafKey, addr ServerAddr, syncDirection SyncDirection, + syncStatus ProofSyncStatus, + bumpSyncAttemptCounter bool) (int64, error) + + // QueryFederationProofSyncLog queries the federation proof sync log and + // returns the log entries which correspond to the given universe proof + // leaf. + QueryFederationProofSyncLog(ctx context.Context, uniID Identifier, + leafKey LeafKey, syncDirection SyncDirection, + syncStatus ProofSyncStatus) ([]*ProofSyncLogEntry, error) + + // FetchPendingProofsSyncLog queries the federation proof sync log and + // returns all log entries with sync status pending. + FetchPendingProofsSyncLog(ctx context.Context, + syncDirection *SyncDirection) ([]*ProofSyncLogEntry, error) + + // DeleteProofsSyncLogEntries deletes proof sync log entries. + DeleteProofsSyncLogEntries(ctx context.Context, + servers ...ServerAddr) error +} + +// FederationDB is used for CRUD operations related to federation logs and +// configuration. type FederationDB interface { FederationLog + FederationProofSyncLog FederationSyncConfigDB } diff --git a/universe/syncer.go b/universe/syncer.go index 8b5c7ca2e..cff1894f2 100644 --- a/universe/syncer.go +++ b/universe/syncer.go @@ -461,6 +461,7 @@ func (s *SimpleSyncer) SyncUniverse(ctx context.Context, host ServerAddr, return nil, fmt.Errorf("unable to create remote diff "+ "engine: %w", err) } + defer diffEngine.Close() // With the engine created, we can now sync the local Universe with the // remote instance. diff --git a/universe_rpc_diff.go b/universe_rpc_diff.go index e1994e105..cfac22a6e 100644 --- a/universe_rpc_diff.go +++ b/universe_rpc_diff.go @@ -15,7 +15,7 @@ import ( // RpcUniverseDiff is an implementation of the universe.DiffEngine interface // that uses an RPC connection to target Universe. type RpcUniverseDiff struct { - conn unirpc.UniverseClient + conn *universeClientConn } // NewRpcUniverseDiff creates a new RpcUniverseDiff instance that dials out to @@ -210,6 +210,17 @@ func (r *RpcUniverseDiff) FetchProofLeaf(ctx context.Context, return []*universe.Proof{uniProof}, nil } +// Close closes the underlying RPC connection to the remote universe server. +func (r *RpcUniverseDiff) Close() error { + if err := r.conn.Close(); err != nil { + tapdLog.Warnf("unable to close universe RPC "+ + "connection: %v", err) + return err + } + + return nil +} + // A compile time interface to ensure that RpcUniverseDiff implements the // universe.DiffEngine interface. var _ universe.DiffEngine = (*RpcUniverseDiff)(nil) diff --git a/universe_rpc_registrar.go b/universe_rpc_registrar.go index 59bd3dc0b..477e2c49b 100644 --- a/universe_rpc_registrar.go +++ b/universe_rpc_registrar.go @@ -18,7 +18,7 @@ import ( // RpcUniverseRegistrar is an implementation of the universe.Registrar interface // that uses an RPC connection to target Universe. type RpcUniverseRegistrar struct { - conn unirpc.UniverseClient + conn *universeClientConn } // NewRpcUniverseRegistrar creates a new RpcUniverseRegistrar instance that @@ -115,6 +115,17 @@ func (r *RpcUniverseRegistrar) UpsertProofLeaf(ctx context.Context, return unmarshalIssuanceProof(uniKey, proofResp) } +// Close closes the underlying RPC connection to the remote Universe server. +func (r *RpcUniverseRegistrar) Close() error { + if err := r.conn.Close(); err != nil { + tapdLog.Warnf("unable to close universe RPC "+ + "connection: %v", err) + return err + } + + return nil +} + // A compile time interface to ensure that RpcUniverseRegistrar implements the // universe.Registrar interface. var _ universe.Registrar = (*RpcUniverseRegistrar)(nil) @@ -151,10 +162,18 @@ func CheckFederationServer(localRuntimeID int64, connectTimeout time.Duration, return nil } +// universeClientConn is a wrapper around a gRPC client connection that also +// includes the raw connection. This allows us to properly manage the lifecycle +// of the connection. +type universeClientConn struct { + *grpc.ClientConn + unirpc.UniverseClient +} + // ConnectUniverse connects to a remote Universe server using the provided // server address. func ConnectUniverse( - serverAddr universe.ServerAddr) (unirpc.UniverseClient, error) { + serverAddr universe.ServerAddr) (*universeClientConn, error) { // TODO(roasbeef): all info is authenticated, but also want to allow // brontide connect as well, can avoid TLS certs @@ -179,5 +198,8 @@ func ConnectUniverse( "server: %v", err) } - return unirpc.NewUniverseClient(rawConn), nil + return &universeClientConn{ + ClientConn: rawConn, + UniverseClient: unirpc.NewUniverseClient(rawConn), + }, nil } diff --git a/version.go b/version.go index d7714f433..c3e6eb1d1 100644 --- a/version.go +++ b/version.go @@ -45,11 +45,11 @@ const ( AppMinor uint = 3 // AppPatch defines the application patch for this binary. - AppPatch uint = 2 + AppPatch uint = 3 // AppPreRelease MUST only contain characters from semanticAlphabet // per the semantic versioning spec. - AppPreRelease = "alpha" + AppPreRelease = "alpha.rc1" // defaultAgentName is the default name of the software that is added as // the first part of the user agent string.