From 5773754b4331d5a53e934f03fe575cdd53524bbc Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Mon, 4 Dec 2023 17:17:28 -0800 Subject: [PATCH 01/54] universe: fix TCP connection leak by explicitly closing gRPC conn for syncers In this commit, we attempt to fix a TCP connection leak by explicitly closing the gRPC connections we create once we're done with the relevant gRPC client. Otherwise, we'll end up making a new connection for each new asset to be pushed, which can add up. In the future, we should also look into the server-side keep alive options. --- universe/interface.go | 6 ++++++ universe/syncer.go | 2 ++ universe_rpc_diff.go | 13 ++++++++++++- universe_rpc_registrar.go | 28 +++++++++++++++++++++++++--- 4 files changed, 45 insertions(+), 4 deletions(-) diff --git a/universe/interface.go b/universe/interface.go index 5fdf19adb..9abb76878 100644 --- a/universe/interface.go +++ b/universe/interface.go @@ -321,6 +321,9 @@ type Registrar interface { // UpsertProofLeaf upserts a proof leaf within the target universe tree. UpsertProofLeaf(ctx context.Context, id Identifier, key LeafKey, leaf *Leaf) (*Proof, error) + + // Close is used to shutdown the active registrar instance. + Close() error } // Item contains the data fields necessary to insert/update a proof leaf @@ -519,6 +522,9 @@ type DiffEngine interface { // of diff FetchProofLeaf(ctx context.Context, id Identifier, key LeafKey) ([]*Proof, error) + + // Close is used to shutdown the active diff engine instance. + Close() error } // Commitment is an on chain universe commitment. This includes the merkle diff --git a/universe/syncer.go b/universe/syncer.go index 8b5c7ca2e..6d65a716d 100644 --- a/universe/syncer.go +++ b/universe/syncer.go @@ -462,6 +462,8 @@ func (s *SimpleSyncer) SyncUniverse(ctx context.Context, host ServerAddr, "engine: %w", err) } + defer diffEngine.Close() + // With the engine created, we can now sync the local Universe with the // remote instance. return s.executeSync(ctx, diffEngine, syncType, syncConfigs, idsToSync) diff --git a/universe_rpc_diff.go b/universe_rpc_diff.go index e1994e105..cfac22a6e 100644 --- a/universe_rpc_diff.go +++ b/universe_rpc_diff.go @@ -15,7 +15,7 @@ import ( // RpcUniverseDiff is an implementation of the universe.DiffEngine interface // that uses an RPC connection to target Universe. type RpcUniverseDiff struct { - conn unirpc.UniverseClient + conn *universeClientConn } // NewRpcUniverseDiff creates a new RpcUniverseDiff instance that dials out to @@ -210,6 +210,17 @@ func (r *RpcUniverseDiff) FetchProofLeaf(ctx context.Context, return []*universe.Proof{uniProof}, nil } +// Close closes the underlying RPC connection to the remote universe server. +func (r *RpcUniverseDiff) Close() error { + if err := r.conn.Close(); err != nil { + tapdLog.Warnf("unable to close universe RPC "+ + "connection: %v", err) + return err + } + + return nil +} + // A compile time interface to ensure that RpcUniverseDiff implements the // universe.DiffEngine interface. var _ universe.DiffEngine = (*RpcUniverseDiff)(nil) diff --git a/universe_rpc_registrar.go b/universe_rpc_registrar.go index 59bd3dc0b..477e2c49b 100644 --- a/universe_rpc_registrar.go +++ b/universe_rpc_registrar.go @@ -18,7 +18,7 @@ import ( // RpcUniverseRegistrar is an implementation of the universe.Registrar interface // that uses an RPC connection to target Universe. type RpcUniverseRegistrar struct { - conn unirpc.UniverseClient + conn *universeClientConn } // NewRpcUniverseRegistrar creates a new RpcUniverseRegistrar instance that @@ -115,6 +115,17 @@ func (r *RpcUniverseRegistrar) UpsertProofLeaf(ctx context.Context, return unmarshalIssuanceProof(uniKey, proofResp) } +// Close closes the underlying RPC connection to the remote Universe server. +func (r *RpcUniverseRegistrar) Close() error { + if err := r.conn.Close(); err != nil { + tapdLog.Warnf("unable to close universe RPC "+ + "connection: %v", err) + return err + } + + return nil +} + // A compile time interface to ensure that RpcUniverseRegistrar implements the // universe.Registrar interface. var _ universe.Registrar = (*RpcUniverseRegistrar)(nil) @@ -151,10 +162,18 @@ func CheckFederationServer(localRuntimeID int64, connectTimeout time.Duration, return nil } +// universeClientConn is a wrapper around a gRPC client connection that also +// includes the raw connection. This allows us to properly manage the lifecycle +// of the connection. +type universeClientConn struct { + *grpc.ClientConn + unirpc.UniverseClient +} + // ConnectUniverse connects to a remote Universe server using the provided // server address. func ConnectUniverse( - serverAddr universe.ServerAddr) (unirpc.UniverseClient, error) { + serverAddr universe.ServerAddr) (*universeClientConn, error) { // TODO(roasbeef): all info is authenticated, but also want to allow // brontide connect as well, can avoid TLS certs @@ -179,5 +198,8 @@ func ConnectUniverse( "server: %v", err) } - return unirpc.NewUniverseClient(rawConn), nil + return &universeClientConn{ + ClientConn: rawConn, + UniverseClient: unirpc.NewUniverseClient(rawConn), + }, nil } From cde9a05bccee157b65ee189605f5f291b56530ee Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Mon, 4 Dec 2023 17:18:11 -0800 Subject: [PATCH 02/54] proof: ensure callers always tear down couriers This is similar to the prior commit: we add a new method to allow a caller to close down a courier once they're done with it. This ensures that we'll always release the resources once we're done with them. --- proof/courier.go | 40 +++++++++++++++++++++++++++++++++++- tapfreighter/chain_porter.go | 2 ++ universe/auto_syncer.go | 5 +++++ universe/base.go | 5 +++++ 4 files changed, 51 insertions(+), 1 deletion(-) diff --git a/proof/courier.go b/proof/courier.go index ae323ee08..f161b8190 100644 --- a/proof/courier.go +++ b/proof/courier.go @@ -11,6 +11,7 @@ import ( "time" "github.com/btcsuite/btcd/btcec/v2" + "github.com/davecgh/go-spew/spew" "github.com/lightninglabs/lightning-node-connect/hashmailrpc" "github.com/lightninglabs/taproot-assets/asset" "github.com/lightninglabs/taproot-assets/fn" @@ -66,6 +67,9 @@ type Courier interface { // SetSubscribers sets the set of subscribers that will be notified // of proof courier related events. SetSubscribers(map[uint64]*fn.EventReceiver[fn.Event]) + + // Close stops the courier instance. + Close() error } // CourierAddr is a fully validated courier address (including protocol specific @@ -223,6 +227,7 @@ func (h *UniverseRpcCourierAddr) NewCourier(_ context.Context, backoffHandle: backoffHandle, transfer: cfg.TransferLog, subscribers: subscribers, + rawConn: conn, }, nil } @@ -291,11 +296,16 @@ type ProofMailbox interface { // CleanUp attempts to tear down the mailbox as specified by the passed // sid. CleanUp(ctx context.Context, sid streamID) error + + // Close closes the underlying connection to the hashmail server. + Close() error } // HashMailBox is an implementation of the ProofMailbox interface backed by the // hashmailrpc.HashMailClient. type HashMailBox struct { + rawConn *grpc.ClientConn + client hashmailrpc.HashMailClient } @@ -341,7 +351,8 @@ func NewHashMailBox(courierAddr *url.URL) (*HashMailBox, client := hashmailrpc.NewHashMailClient(conn) return &HashMailBox{ - client: client, + client: client, + rawConn: conn, }, nil } @@ -480,6 +491,11 @@ func (h *HashMailBox) CleanUp(ctx context.Context, sid streamID) error { return err } +// Close closes the underlying connection to the hashmail server. +func (h *HashMailBox) Close() error { + return h.rawConn.Close() +} + // A compile-time assertion to ensure that the HashMailBox meets the // ProofMailbox interface. var _ ProofMailbox = (*HashMailBox)(nil) @@ -853,6 +869,8 @@ func (h *HashMailCourier) DeliverProof(ctx context.Context, log.Infof("Received ACK from receiver! Cleaning up mailboxes...") + defer h.Close() + // Once we receive this ACK, we can clean up our mailbox and also the // receiver's mailbox. if err := h.mailbox.CleanUp(ctx, senderStreamID); err != nil { @@ -928,6 +946,17 @@ func (h *HashMailCourier) publishSubscriberEvent(event fn.Event) { } } +// Close closes the underlying connection to the hashmail server. +func (h *HashMailCourier) Close() error { + if err := h.mailbox.Close(); err != nil { + log.Warnf("unable to close mailbox session, "+ + "recipient=%v: %v", err, spew.Sdump(h.recipient)) + return err + } + + return nil +} + // BackoffWaitEvent is an event that is sent to a subscriber each time we // wait via the Backoff procedure before retrying to deliver a proof to the // receiver. @@ -1030,6 +1059,10 @@ type UniverseRpcCourier struct { // the universe RPC server. client unirpc.UniverseClient + // rawConn is the raw connection that the courier will use to interact + // with the remote gRPC service. + rawConn *grpc.ClientConn + // backoffHandle is a handle to the backoff procedure used in proof // delivery. backoffHandle *BackoffHandler @@ -1297,6 +1330,11 @@ func (c *UniverseRpcCourier) publishSubscriberEvent(event fn.Event) { } } +// Close closes the courier's connection to the remote gRPC service. +func (c *UniverseRpcCourier) Close() error { + return c.rawConn.Close() +} + // A compile-time assertion to ensure the UniverseRpcCourier meets the // proof.Courier interface. var _ Courier = (*UniverseRpcCourier)(nil) diff --git a/tapfreighter/chain_porter.go b/tapfreighter/chain_porter.go index e5953c99f..fc73fd1d4 100644 --- a/tapfreighter/chain_porter.go +++ b/tapfreighter/chain_porter.go @@ -671,6 +671,8 @@ func (p *ChainPorter) transferReceiverProof(pkg *sendPackage) error { "service handle: %w", err) } + defer courier.Close() + // Update courier events subscribers before attempting to // deliver proof. p.subscriberMtx.Lock() diff --git a/universe/auto_syncer.go b/universe/auto_syncer.go index 4bed26bae..9e88c029f 100644 --- a/universe/auto_syncer.go +++ b/universe/auto_syncer.go @@ -161,6 +161,11 @@ func (f *FederationEnvoy) Start() error { return nil } +// Close frees up any ephemeral resources allocated by the envoy. +func (f *FederationEnvoy) Close() error { + return nil +} + // Stop stops all active goroutines. func (f *FederationEnvoy) Stop() error { f.stopOnce.Do(func() { diff --git a/universe/base.go b/universe/base.go index 11f09e7f1..309944bbc 100644 --- a/universe/base.go +++ b/universe/base.go @@ -71,6 +71,11 @@ func NewArchive(cfg ArchiveConfig) *Archive { return a } +// Close closes the archive, stopping all goroutines and freeing all resources. +func (a *Archive) Close() error { + return nil +} + // fetchUniverse returns the base universe instance for the passed identifier. // The universe will be loaded in on demand if it has not been seen before. func (a *Archive) fetchUniverse(id Identifier) BaseBackend { From 387b0524da847fdb75334694b19c2db860392663 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Mon, 4 Dec 2023 17:22:40 -0800 Subject: [PATCH 03/54] rpc: set params for the longest time a connection can be idle In this commit, we set a gRPC param that controls how long a connection can be idle for. The goal here is to prune the amount of open TCP connections on an active/popular universe server. According to the docs: > Idleness duration is defined since the most recent time the number of outstanding RPCs became zero or the connection establishment. --- server.go | 8 ++++++++ universe/syncer.go | 1 - 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/server.go b/server.go index fd69e1558..da1bba94c 100644 --- a/server.go +++ b/server.go @@ -7,6 +7,7 @@ import ( "strings" "sync" "sync/atomic" + "time" proxy "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" "github.com/lightninglabs/lndclient" @@ -21,6 +22,7 @@ import ( "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/macaroons" "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" "gopkg.in/macaroon-bakery.v2/bakery" ) @@ -269,6 +271,12 @@ func (s *Server) RunUntilShutdown(mainErrChan <-chan error) error { serverOpts = append(serverOpts, rpcServerOpts...) serverOpts = append(serverOpts, ServerMaxMsgReceiveSize) + keepAliveParams := keepalive.ServerParameters{ + MaxConnectionIdle: time.Minute * 2, + } + + serverOpts = append(serverOpts, grpc.KeepaliveParams(keepAliveParams)) + grpcServer := grpc.NewServer(serverOpts...) defer grpcServer.Stop() diff --git a/universe/syncer.go b/universe/syncer.go index 6d65a716d..cff1894f2 100644 --- a/universe/syncer.go +++ b/universe/syncer.go @@ -461,7 +461,6 @@ func (s *SimpleSyncer) SyncUniverse(ctx context.Context, host ServerAddr, return nil, fmt.Errorf("unable to create remote diff "+ "engine: %w", err) } - defer diffEngine.Close() // With the engine created, we can now sync the local Universe with the From 66bd4dcab26465c02bb37b62fe786296e6c3702a Mon Sep 17 00:00:00 2001 From: George Tsagkarelis Date: Tue, 12 Dec 2023 12:44:58 +0100 Subject: [PATCH 04/54] monitoring: add basic collectors --- monitoring/asset_balances_collector.go | 114 ++++++++++++++++++++++ monitoring/asset_collector.go | 128 +++++++++++++++++++++++++ monitoring/garden_collector.go | 120 +++++++++++++++++++++++ 3 files changed, 362 insertions(+) create mode 100644 monitoring/asset_balances_collector.go create mode 100644 monitoring/asset_collector.go create mode 100644 monitoring/garden_collector.go diff --git a/monitoring/asset_balances_collector.go b/monitoring/asset_balances_collector.go new file mode 100644 index 000000000..462711c6d --- /dev/null +++ b/monitoring/asset_balances_collector.go @@ -0,0 +1,114 @@ +package monitoring + +import ( + "context" + "errors" + "sync" + + "github.com/prometheus/client_golang/prometheus" +) + +// assetBalancesCollector is a Prometheus collector that exports the balances +// of all taproot assets. +type assetBalancesCollector struct { + collectMx sync.Mutex + + cfg *PrometheusConfig + registry *prometheus.Registry + + balancesVec *prometheus.GaugeVec + + utxosVec *prometheus.GaugeVec +} + +func newAssetBalancesCollector(cfg *PrometheusConfig, + registry *prometheus.Registry) (*assetBalancesCollector, error) { + + if cfg == nil { + return nil, errors.New("asset collector prometheus cfg is nil") + } + + if cfg.AssetStore == nil { + return nil, errors.New("asset collector asset store is nil") + } + + return &assetBalancesCollector{ + cfg: cfg, + registry: registry, + balancesVec: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "asset_balances", + Help: "Balances of all taproot assets", + }, + []string{"asset_name"}, + ), + utxosVec: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "utxos_assets_held", + Help: "Number of UTXOs used for taproot assets", + }, + []string{"outpoint"}, + ), + }, nil +} + +// Describe sends the super-set of all possible descriptors of metrics +// collected by this Collector to the provided channel and returns once the +// last descriptor has been sent. +// +// NOTE: Part of the prometheus.Collector interface. +func (a *assetBalancesCollector) Describe(ch chan<- *prometheus.Desc) { + a.collectMx.Lock() + defer a.collectMx.Unlock() + + a.balancesVec.Describe(ch) + a.utxosVec.Describe(ch) +} + +// Collect is called by the Prometheus registry when collecting metrics. +// +// NOTE: Part of the prometheus.Collector interface. +func (a *assetBalancesCollector) Collect(ch chan<- prometheus.Metric) { + a.collectMx.Lock() + defer a.collectMx.Unlock() + + ctxdb, cancel := context.WithTimeout(context.Background(), dbTimeout) + defer cancel() + + assets, err := a.cfg.AssetStore.FetchAllAssets(ctxdb, false, false, nil) + if err != nil { + log.Errorf("unable to fetch assets: %v", err) + return + } + + utxos, err := a.cfg.AssetStore.FetchManagedUTXOs(ctxdb) + if err != nil { + log.Errorf("unable to fetch utxos: %v", err) + return + } + + a.utxosVec.Reset() + a.balancesVec.Reset() + + utxoMap := make(map[string]prometheus.Gauge) + + for _, utxo := range utxos { + utxoOutpoint := utxo.OutPoint.String() + utxoMap[utxoOutpoint] = a.utxosVec.WithLabelValues(utxoOutpoint) + } + + for _, asset := range assets { + a.balancesVec.WithLabelValues(asset.Tag). + Set(float64(asset.Amount)) + + utxoGauge, ok := utxoMap[asset.AnchorOutpoint.String()] + if !ok { + continue + } + + utxoGauge.Inc() + } + + a.balancesVec.Collect(ch) + a.utxosVec.Collect(ch) +} diff --git a/monitoring/asset_collector.go b/monitoring/asset_collector.go new file mode 100644 index 000000000..83e608746 --- /dev/null +++ b/monitoring/asset_collector.go @@ -0,0 +1,128 @@ +package monitoring + +import ( + "context" + "errors" + "sync" + + "github.com/prometheus/client_golang/prometheus" +) + +const ( + numAssetsMintedMetric = "num_assets_minted" + + numTotalGroupsMetric = "num_total_groups" + + numTotalSyncsMetric = "num_total_syncs" + + numTotalProofsMetric = "num_total_proofs" +) + +// universeStatsCollector is a Prometheus collector that exports the stats of +// the universe. +type universeStatsCollector struct { + collectMx sync.Mutex + + cfg *PrometheusConfig + registry *prometheus.Registry + + gauges map[string]prometheus.Gauge +} + +func newUniverseStatsCollector(cfg *PrometheusConfig, + registry *prometheus.Registry) (*universeStatsCollector, error) { + + if cfg == nil { + return nil, errors.New("universe stats collector prometheus " + + "cfg is nil") + } + + if cfg.UniverseStats == nil { + return nil, errors.New("universe stats collector universe " + + "stats is nil") + } + + gaugesMap := map[string]prometheus.Gauge{ + numAssetsMintedMetric: prometheus.NewGauge( + prometheus.GaugeOpts{ + Name: numAssetsMintedMetric, + Help: "Total number of assets minted", + }, + ), + numTotalGroupsMetric: prometheus.NewGauge( + prometheus.GaugeOpts{ + Name: numTotalGroupsMetric, + Help: "Total number of groups", + }, + ), + numTotalSyncsMetric: prometheus.NewGauge( + prometheus.GaugeOpts{ + Name: numTotalSyncsMetric, + Help: "Total number of syncs", + }, + ), + numTotalProofsMetric: prometheus.NewGauge( + prometheus.GaugeOpts{ + Name: numTotalProofsMetric, + Help: "Total number of proofs", + }, + ), + } + + return &universeStatsCollector{ + cfg: cfg, + registry: registry, + gauges: gaugesMap, + }, nil +} + +// Describe sends the super-set of all possible descriptors of metrics +// collected by this Collector to the provided channel and returns once the +// last descriptor has been sent. +// +// NOTE: Part of the prometheus.Collector interface. +func (a *universeStatsCollector) Describe(ch chan<- *prometheus.Desc) { + a.collectMx.Lock() + defer a.collectMx.Unlock() + + for _, gauge := range a.gauges { + gauge.Describe(ch) + } +} + +// Collect is called by the Prometheus registry when collecting metrics. +// +// NOTE: Part of the prometheus.Collector interface. +func (a *universeStatsCollector) Collect(ch chan<- prometheus.Metric) { + a.collectMx.Lock() + defer a.collectMx.Unlock() + + ctx, cancel := context.WithTimeout(context.Background(), dbTimeout) + defer cancel() + + universeStats, err := a.cfg.UniverseStats.AggregateSyncStats(ctx) + if err != nil { + log.Errorf("unable to get aggregate universe stats: %v", err) + return + } + + a.gauges[numAssetsMintedMetric].Set( + float64(universeStats.NumTotalAssets), + ) + + a.gauges[numTotalGroupsMetric].Set( + float64(universeStats.NumTotalGroups), + ) + + a.gauges[numTotalSyncsMetric].Set( + float64(universeStats.NumTotalSyncs), + ) + + a.gauges[numTotalProofsMetric].Set( + float64(universeStats.NumTotalProofs), + ) + + for _, gauge := range a.gauges { + gauge.Collect(ch) + } +} diff --git a/monitoring/garden_collector.go b/monitoring/garden_collector.go new file mode 100644 index 000000000..c37e96e83 --- /dev/null +++ b/monitoring/garden_collector.go @@ -0,0 +1,120 @@ +package monitoring + +import ( + "errors" + "sync" + + "github.com/lightninglabs/taproot-assets/tapgarden" + "github.com/prometheus/client_golang/prometheus" +) + +// assetBalancesCollector is a Prometheus collector that exports the balances +// of all taproot assets. +type gardenCollector struct { + collectMx sync.Mutex + + cfg *PrometheusConfig + registry *prometheus.Registry + + pendingBatches *prometheus.GaugeVec + completedBatches prometheus.Gauge +} + +func newGardenCollector(cfg *PrometheusConfig, + registry *prometheus.Registry) (*gardenCollector, error) { + + if cfg == nil { + return nil, errors.New("garden collector prometheus cfg is nil") + } + + if cfg.AssetStore == nil { + return nil, errors.New("garden collector asset store is nil") + } + + return &gardenCollector{ + cfg: cfg, + registry: registry, + pendingBatches: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "mint_batches", + Help: "Batched mint transactions", + }, + []string{"batch_pubkey"}, + ), + completedBatches: prometheus.NewGauge( + prometheus.GaugeOpts{ + Name: "completed_batches", + Help: "Total number of completed mint batches", + }, + ), + }, nil +} + +// Describe sends the super-set of all possible descriptors of metrics +// collected by this Collector to the provided channel and returns once the +// last descriptor has been sent. +// +// NOTE: Part of the prometheus.Collector interface. +func (a *gardenCollector) Describe(ch chan<- *prometheus.Desc) { + a.collectMx.Lock() + defer a.collectMx.Unlock() + + a.pendingBatches.Describe(ch) + a.completedBatches.Describe(ch) +} + +// Collect is called by the Prometheus registry when collecting metrics. +// +// NOTE: Part of the prometheus.Collector interface. +func (a *gardenCollector) Collect(ch chan<- prometheus.Metric) { + a.collectMx.Lock() + defer a.collectMx.Unlock() + + a.completedBatches.Set(0) + + // Get the number of pending batches. + batches, err := a.cfg.AssetMinter.ListBatches(nil) + if err != nil { + log.Errorf("unable to list batches: %v", err) + return + } + + completed := 0 + + for _, batch := range batches { + state := batch.State() + + switch { + case state == tapgarden.BatchStatePending || + state == tapgarden.BatchStateFrozen || + state == tapgarden.BatchStateCommitted || + state == tapgarden.BatchStateBroadcast || + state == tapgarden.BatchStateConfirmed: + + if state == tapgarden.BatchStatePending { + a.pendingBatches.WithLabelValues( + batch.BatchKey.PubKey.X().String(), + ).Set( + float64(len(batch.Seedlings)), + ) + } + + case state == tapgarden.BatchStateFinalized || + state == tapgarden.BatchStateSeedlingCancelled || + state == tapgarden.BatchStateSproutCancelled: + + a.pendingBatches.DeleteLabelValues( + batch.BatchKey.PubKey.X().String(), + ) + + if state == tapgarden.BatchStateFinalized { + completed += 1 + } + } + } + + a.completedBatches.Set(float64(completed)) + + a.pendingBatches.Collect(ch) + a.completedBatches.Collect(ch) +} From b3ca015dac4e341082be235749e7d4954ecc7ad6 Mon Sep 17 00:00:00 2001 From: George Tsagkarelis Date: Tue, 12 Dec 2023 12:45:55 +0100 Subject: [PATCH 05/54] monitoring+tapd: register collectors to prometheus --- monitoring/config.go | 19 ++++++- monitoring/interface.go | 29 ---------- monitoring/prometheus.go | 120 ++++++++++++--------------------------- server.go | 16 +++++- 4 files changed, 66 insertions(+), 118 deletions(-) delete mode 100644 monitoring/interface.go diff --git a/monitoring/config.go b/monitoring/config.go index d7f7c0b77..53dd37381 100644 --- a/monitoring/config.go +++ b/monitoring/config.go @@ -1,6 +1,11 @@ package monitoring -import "google.golang.org/grpc" +import ( + "github.com/lightninglabs/taproot-assets/tapdb" + "github.com/lightninglabs/taproot-assets/tapgarden" + "github.com/lightninglabs/taproot-assets/universe" + "google.golang.org/grpc" +) // PrometheusConfig is the set of configuration data that specifies if // Prometheus metric exporting is activated, and if so the listening address of @@ -17,6 +22,18 @@ type PrometheusConfig struct { // generic RPC metrics to monitor the health of the service. RPCServer *grpc.Server + // UniverseStats is used to collect any stats that are relevant to the + // universe. + UniverseStats universe.Telemetry + + // AssetStore is used to collect any stats that are relevant to the + // asset store. + AssetStore *tapdb.AssetStore + + // AssetMinter is used to collect any stats that are relevant to the + // asset minter. + AssetMinter tapgarden.Planter + // PerfHistograms indicates if the additional histogram information for // latency, and handling time of gRPC calls should be enabled. This // generates additional data, and consume more memory for the diff --git a/monitoring/interface.go b/monitoring/interface.go deleted file mode 100644 index 0b9151315..000000000 --- a/monitoring/interface.go +++ /dev/null @@ -1,29 +0,0 @@ -package monitoring - -import "github.com/prometheus/client_golang/prometheus" - -// metricGroupFactory is a factory method that given the primary prometheus -// config, will create a new MetricGroup that will be managed by the main -// PrometheusExporter. -type metricGroupFactory func(*PrometheusConfig) (MetricGroup, error) - -// MetricGroup is the primary interface of this package. The main exporter (in -// this case the PrometheusExporter), will manage these directly, ensuring that -// all MetricGroups are registered before the main prometheus exporter starts -// and any additional tracing is added. -type MetricGroup interface { - // Collector is the embedded interface that forces every MetricGroup to - // also be a collector. - prometheus.Collector - - // Name is the name of the metric group. When exported to prometheus, - // it's expected that all metric under this group have the same prefix. - Name() string - - // RegisterMetricFuncs signals to the underlying hybrid collector that - // it should register all metrics that it aims to export with the - // global Prometheus registry. Rather than using the series of - // "MustRegister" directives, implementers of this interface should - // instead propagate back any errors related to metric registration. - RegisterMetricFuncs() error -} diff --git a/monitoring/prometheus.go b/monitoring/prometheus.go index c19dd647b..b352e65f7 100644 --- a/monitoring/prometheus.go +++ b/monitoring/prometheus.go @@ -4,7 +4,6 @@ import ( "errors" "fmt" "net/http" - "sync" "time" //nolint:lll @@ -15,35 +14,29 @@ import ( ) var ( - // metricGroups is a global variable of all registered metrics - // projected by the mutex below. All new MetricGroups should add - // themselves to this map within the init() method of their file. - metricGroups = make(map[string]metricGroupFactory) - - // activeGroups is a global map of all active metric groups. This can - // be used by some of the "static' package level methods to look up the - // target metric group to export observations. - activeGroups = make(map[string]MetricGroup) - - // metricsMtx is a global mutex that should be held when accessing the - // global maps. - metricsMtx sync.Mutex - // serverMetrics is a global variable that holds the Prometheus metrics // for the gRPC server. serverMetrics *grpc_prometheus.ServerMetrics ) +const ( + // dbTimeout is the default database timeout. + dbTimeout = 20 * time.Second +) + // PrometheusExporter is a metric exporter that uses Prometheus directly. The // internal server will interact with this struct in order to export relevant // metrics. type PrometheusExporter struct { - config *PrometheusConfig + config *PrometheusConfig + registry *prometheus.Registry } // Start registers all relevant metrics with the Prometheus library, then // launches the HTTP server that Prometheus will hit to scrape our metrics. func (p *PrometheusExporter) Start() error { + log.Infof("Starting Prometheus Exporter") + // If we're not active, then there's nothing more to do. if !p.config.Active { return nil @@ -54,28 +47,43 @@ func (p *PrometheusExporter) Start() error { return fmt.Errorf("server metrics not set") } - reg := prometheus.NewRegistry() - reg.MustRegister(collectors.NewProcessCollector( + // Create a custom Prometheus registry. + p.registry = prometheus.NewRegistry() + p.registry.MustRegister(collectors.NewProcessCollector( collectors.ProcessCollectorOpts{}, )) - reg.MustRegister(collectors.NewGoCollector()) - reg.MustRegister(serverMetrics) + p.registry.MustRegister(collectors.NewGoCollector()) + p.registry.MustRegister(serverMetrics) - // Make ensure that all metrics exist when collecting and querying. - serverMetrics.InitializeMetrics(p.config.RPCServer) + uniStatsCollector, err := newUniverseStatsCollector(p.config, p.registry) + if err != nil { + return err + } + p.registry.MustRegister(uniStatsCollector) - // Next, we'll attempt to register all our metrics. If we fail to - // register ANY metric, then we'll fail all together. - if err := p.registerMetrics(); err != nil { + assetBalancesCollecor, err := + newAssetBalancesCollector(p.config, p.registry) + if err != nil { return err } + p.registry.MustRegister(assetBalancesCollecor) + + gardenCollector, err := newGardenCollector(p.config, p.registry) + if err != nil { + return err + } + p.registry.MustRegister(gardenCollector) + + // Make ensure that all metrics exist when collecting and querying. + serverMetrics.InitializeMetrics(p.config.RPCServer) // Finally, we'll launch the HTTP server that Prometheus will use to - // scape our metrics. + // scrape our metrics. go func() { + // Use our custom prometheus registry. promMux := http.NewServeMux() promMux.Handle("/metrics", promhttp.HandlerFor( - reg, promhttp.HandlerOpts{ + p.registry, promhttp.HandlerOpts{ EnableOpenMetrics: true, MaxRequestsInFlight: 1, }), @@ -98,61 +106,3 @@ func (p *PrometheusExporter) Start() error { return nil } - -// registerMetrics iterates through all the registered metric groups and -// attempts to register each one. If any of the MetricGroups fail to register, -// then an error will be returned. -func (p *PrometheusExporter) registerMetrics() error { - metricsMtx.Lock() - defer metricsMtx.Unlock() - - for _, metricGroupFunc := range metricGroups { - metricGroup, err := metricGroupFunc(p.config) - if err != nil { - return err - } - - if err := metricGroup.RegisterMetricFuncs(); err != nil { - return err - } - - activeGroups[metricGroup.Name()] = metricGroup - } - - return nil -} - -// gauges is a map type that maps a gauge to its unique name. -type gauges map[string]*prometheus.GaugeVec // nolint:unused - -// addGauge adds a new gauge vector to the map. -func (g gauges) addGauge(name, help string, labels []string) { // nolint:unused - g[name] = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Name: name, - Help: help, - }, - labels, - ) -} - -// describe describes all gauges contained in the map to the given channel. -func (g gauges) describe(ch chan<- *prometheus.Desc) { // nolint:unused - for _, gauge := range g { - gauge.Describe(ch) - } -} - -// collect collects all metrics of the map's gauges to the given channel. -func (g gauges) collect(ch chan<- prometheus.Metric) { // nolint:unused - for _, gauge := range g { - gauge.Collect(ch) - } -} - -// reset resets all gauges in the map. -func (g gauges) reset() { // nolint:unused - for _, gauge := range g { - gauge.Reset() - } -} diff --git a/server.go b/server.go index da1bba94c..73d03af5b 100644 --- a/server.go +++ b/server.go @@ -321,6 +321,16 @@ func (s *Server) RunUntilShutdown(mainErrChan <-chan error) error { // configuration. s.cfg.Prometheus.RPCServer = grpcServer + // Provide Prometheus collectors with access to Universe stats. + s.cfg.Prometheus.UniverseStats = s.cfg.UniverseStats + + // Provide Prometheus collectors with access to the asset store. + s.cfg.Prometheus.AssetStore = s.cfg.AssetStore + + // Provide Prometheus collectors with access to the asset + // minter. + s.cfg.Prometheus.AssetMinter = s.cfg.AssetMinter + promExporter, err := monitoring.NewPrometheusExporter( &s.cfg.Prometheus, ) @@ -329,13 +339,13 @@ func (s *Server) RunUntilShutdown(mainErrChan <-chan error) error { err) } - srvrLog.Infof("Prometheus exporter server listening on %v", - s.cfg.Prometheus.ListenAddr) - if err := promExporter.Start(); err != nil { return mkErr("Unable to start prometheus exporter: %v", err) } + + srvrLog.Infof("Prometheus exporter server listening on %v", + s.cfg.Prometheus.ListenAddr) } srvrLog.Infof("Taproot Asset Daemon fully active!") From e8ed06bfbd585f3bbf393baeffebec39619f9241 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Fri, 26 Jan 2024 13:41:15 -0800 Subject: [PATCH 06/54] build: bump version v0.3.3 --- version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version.go b/version.go index d7714f433..2bb292896 100644 --- a/version.go +++ b/version.go @@ -45,7 +45,7 @@ const ( AppMinor uint = 3 // AppPatch defines the application patch for this binary. - AppPatch uint = 2 + AppPatch uint = 3 // AppPreRelease MUST only contain characters from semanticAlphabet // per the semantic versioning spec. From 98d2142905396d63ba0e549fb3b30ad61f37c488 Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Thu, 30 Nov 2023 17:56:18 -0600 Subject: [PATCH 07/54] multi: add more context to errors --- proof/verifier.go | 2 +- tapgarden/caretaker.go | 8 +++++--- universe/base.go | 2 +- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/proof/verifier.go b/proof/verifier.go index 098a1ac17..02deae762 100644 --- a/proof/verifier.go +++ b/proof/verifier.go @@ -331,7 +331,7 @@ func (p *Proof) verfyGenesisGroupKey(groupVerifier GroupVerifier) error { groupKey := p.Asset.GroupKey.GroupPubKey err := groupVerifier(&groupKey) if err != nil { - return ErrGroupKeyUnknown + return fmt.Errorf("%w: %v", ErrGroupKeyUnknown, err) } return nil diff --git a/tapgarden/caretaker.go b/tapgarden/caretaker.go index d60c22dbb..de90b6c04 100644 --- a/tapgarden/caretaker.go +++ b/tapgarden/caretaker.go @@ -1364,8 +1364,8 @@ func GenGroupVerifier(ctx context.Context, ctx, groupKey, ) if err != nil { - return fmt.Errorf("%x: %w", assetGroupKey, - ErrGroupKeyUnknown) + return fmt.Errorf("%x: group verifier: %v: %w", + assetGroupKey[:], err, ErrGroupKeyUnknown) } _, _ = assetGroups.Put(assetGroupKey, emptyCacheVal{}) @@ -1394,7 +1394,9 @@ func GenGroupAnchorVerifier(ctx context.Context, ctx, &groupKey.GroupPubKey, ) if err != nil { - return ErrGroupKeyUnknown + return fmt.Errorf("%x: group anchor verifier: "+ + "%w", assetGroupKey[:], + ErrGroupKeyUnknown) } groupAnchor = newSingleValue(storedGroup.Genesis) diff --git a/universe/base.go b/universe/base.go index 309944bbc..03aadf95f 100644 --- a/universe/base.go +++ b/universe/base.go @@ -225,7 +225,7 @@ func (a *Archive) UpsertProofLeaf(ctx context.Context, id Identifier, ctx, id, key, &newProof, prevAssetSnapshot, ) if err != nil { - return nil, err + return nil, fmt.Errorf("unable to verify proof: %w", err) } // Now that we know the proof is valid, we'll insert it into the base From 5c3899398ab83a7a5c38105692850c726416b268 Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Thu, 30 Nov 2023 17:56:43 -0600 Subject: [PATCH 08/54] multi: fix typos and formatting --- itest/assertions.go | 2 +- itest/universe_test.go | 4 +--- proof/verifier.go | 6 +++--- tapgarden/caretaker.go | 16 ++++++---------- 4 files changed, 11 insertions(+), 17 deletions(-) diff --git a/itest/assertions.go b/itest/assertions.go index 359ac508e..048a397ec 100644 --- a/itest/assertions.go +++ b/itest/assertions.go @@ -870,7 +870,7 @@ func AssertBalanceByID(t *testing.T, client taprpc.TaprootAssetsClient, } require.True(t, ok) - require.Equal(t, uint64(amt), uint64(balance.Balance)) + require.Equal(t, amt, balance.Balance) } // AssertBalanceByGroup asserts that the balance of a single asset group diff --git a/itest/universe_test.go b/itest/universe_test.go index f4340aeab..57597e273 100644 --- a/itest/universe_test.go +++ b/itest/universe_test.go @@ -11,17 +11,15 @@ import ( "github.com/btcsuite/btcd/btcec/v2" "github.com/btcsuite/btcd/btcec/v2/schnorr" - "github.com/lightninglabs/taproot-assets/internal/test" - tap "github.com/lightninglabs/taproot-assets" "github.com/lightninglabs/taproot-assets/asset" "github.com/lightninglabs/taproot-assets/fn" + "github.com/lightninglabs/taproot-assets/internal/test" "github.com/lightninglabs/taproot-assets/mssmt" "github.com/lightninglabs/taproot-assets/taprpc" "github.com/lightninglabs/taproot-assets/taprpc/mintrpc" unirpc "github.com/lightninglabs/taproot-assets/taprpc/universerpc" "github.com/lightninglabs/taproot-assets/universe" - "github.com/lightningnetwork/lnd/lntest/wait" "github.com/stretchr/testify/require" "golang.org/x/exp/maps" diff --git a/proof/verifier.go b/proof/verifier.go index 02deae762..18c3e9b81 100644 --- a/proof/verifier.go +++ b/proof/verifier.go @@ -327,7 +327,7 @@ func (p *Proof) verifyGenesisReveal() error { // verifyGenesisGroupKey verifies that the group key attached to the asset in // this proof has already been verified. -func (p *Proof) verfyGenesisGroupKey(groupVerifier GroupVerifier) error { +func (p *Proof) verifyGenesisGroupKey(groupVerifier GroupVerifier) error { groupKey := p.Asset.GroupKey.GroupPubKey err := groupVerifier(&groupKey) if err != nil { @@ -487,7 +487,7 @@ func (p *Proof) Verify(ctx context.Context, prev *AssetSnapshot, case isGenesisAsset && hasGroupKey && !hasGroupKeyReveal: // A reissuance must be for an asset group that has already // been imported and verified. - if err := p.verfyGenesisGroupKey(groupVerifier); err != nil { + if err := p.verifyGenesisGroupKey(groupVerifier); err != nil { return nil, err } @@ -500,7 +500,7 @@ func (p *Proof) Verify(ctx context.Context, prev *AssetSnapshot, // 7. Verify group key for asset transfers. Any asset with a group key // must carry a group key that has already been imported and verified. if !isGenesisAsset && hasGroupKey { - if err := p.verfyGenesisGroupKey(groupVerifier); err != nil { + if err := p.verifyGenesisGroupKey(groupVerifier); err != nil { return nil, err } } diff --git a/tapgarden/caretaker.go b/tapgarden/caretaker.go index de90b6c04..fb1c1c65a 100644 --- a/tapgarden/caretaker.go +++ b/tapgarden/caretaker.go @@ -17,7 +17,6 @@ import ( "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" "github.com/lightninglabs/neutrino/cache/lru" - "github.com/lightninglabs/taproot-assets/asset" "github.com/lightninglabs/taproot-assets/commitment" "github.com/lightninglabs/taproot-assets/fn" @@ -1336,14 +1335,13 @@ func newSingleValue[T any](v T) singleCacheValue[T] { // is used more as a set. type emptyCacheVal = singleCacheValue[emptyVal] -// GenGroupVeifier generates a group key verification callback function given a +// GenGroupVerifier generates a group key verification callback function given a // DB handle. func GenGroupVerifier(ctx context.Context, mintingStore MintingStore) func(*btcec.PublicKey) error { // Cache known group keys that were previously fetched. - assetGroups := lru.NewCache[ - asset.SerializedKey, emptyCacheVal]( + assetGroups := lru.NewCache[asset.SerializedKey, emptyCacheVal]( assetGroupCacheSize, ) @@ -1360,9 +1358,7 @@ func GenGroupVerifier(ctx context.Context, // This query will err if no stored group has a matching // tweaked group key. - _, err = mintingStore.FetchGroupByGroupKey( - ctx, groupKey, - ) + _, err = mintingStore.FetchGroupByGroupKey(ctx, groupKey) if err != nil { return fmt.Errorf("%x: group verifier: %v: %w", assetGroupKey[:], err, ErrGroupKeyUnknown) @@ -1377,12 +1373,12 @@ func GenGroupVerifier(ctx context.Context, // GenGroupAnchorVerifier generates a caching group anchor verification // callback function given a DB handle. func GenGroupAnchorVerifier(ctx context.Context, - mintingStore MintingStore) func(*asset.Genesis, - *asset.GroupKey) error { + mintingStore MintingStore) func(*asset.Genesis, *asset.GroupKey) error { // Cache anchors for groups that were previously fetched. groupAnchors := lru.NewCache[ - asset.SerializedKey, singleCacheValue[*asset.Genesis]]( + asset.SerializedKey, singleCacheValue[*asset.Genesis], + ]( assetGroupCacheSize, ) From bf92cb9817e8d6de75c3299f0d341eb619d4b2fe Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Thu, 30 Nov 2023 17:57:01 -0600 Subject: [PATCH 09/54] tapfreighter: don't use proof courier for pre-signed parcel Since a pre-signed parcel (a parcel that uses the RPC driven vPSBT flow) doesn't have proof courier URLs (they aren't part of the vPSBT), the proofs must always be delivered in an interactive manner from sender to receiver and we don't even need to attempt to use a proof courier. --- tapfreighter/chain_porter.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/tapfreighter/chain_porter.go b/tapfreighter/chain_porter.go index fc73fd1d4..8295567b0 100644 --- a/tapfreighter/chain_porter.go +++ b/tapfreighter/chain_porter.go @@ -701,8 +701,14 @@ func (p *ChainPorter) transferReceiverProof(pkg *sendPackage) error { } // If we have a proof courier instance active, then we'll launch several - // goroutines to deliver the proof(s) to the receiver(s). - if p.cfg.ProofCourierCfg != nil { + // goroutines to deliver the proof(s) to the receiver(s). Since a + // pre-signed parcel (a parcel that uses the RPC driven vPSBT flow) + // doesn't have proof courier URLs (they aren't part of the vPSBT), the + // proofs must always be delivered in an interactive manner from sender + // to receiver, and we don't even need to attempt to use a proof + // courier. + _, isPreSigned := pkg.Parcel.(*PreSignedParcel) + if p.cfg.ProofCourierCfg != nil && !isPreSigned { ctx, cancel := p.WithCtxQuitNoTimeout() defer cancel() From 9e0baefe1189d3553660b5689e66b52530bdcfba Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Thu, 30 Nov 2023 18:05:07 -0600 Subject: [PATCH 10/54] itest: use universe RPC proof courier by default With this commit we remove the use of the proof.DisabledCourier courier by default and instead use the universe RPC proof courier if nothing is specified in the integration test case list. --- itest/test_harness.go | 7 +++---- itest/test_list_on_test.go | 34 +++++++++++++--------------------- 2 files changed, 16 insertions(+), 25 deletions(-) diff --git a/itest/test_harness.go b/itest/test_harness.go index feed49d23..993fbafa5 100644 --- a/itest/test_harness.go +++ b/itest/test_harness.go @@ -275,15 +275,14 @@ func setupHarnesses(t *testing.T, ht *harnessTest, // courier service and attach to test harness. var proofCourier proof.CourierHarness switch proofCourierType { - case proof.DisabledCourier: - // Proof courier disabled, do nothing. - case proof.HashmailCourierType: port := nextAvailablePort() apHarness := NewApertureHarness(ht.t, port) proofCourier = &apHarness - case proof.UniverseRpcCourierType: + // If nothing is specified, we use the universe RPC proof courier by + // default. + default: proofCourier = NewUniverseRPCHarness(t, ht, lndHarness.Bob) } diff --git a/itest/test_list_on_test.go b/itest/test_list_on_test.go index 841db91a1..9f0297038 100644 --- a/itest/test_list_on_test.go +++ b/itest/test_list_on_test.go @@ -2,9 +2,7 @@ package itest -import ( - "github.com/lightninglabs/taproot-assets/proof" -) +import "github.com/lightninglabs/taproot-assets/proof" var testCases = []*testCase{ { @@ -48,14 +46,12 @@ var testCases = []*testCase{ proofCourierType: proof.HashmailCourierType, }, { - name: "basic send universerpc proof courier", - test: testBasicSendUnidirectional, - proofCourierType: proof.UniverseRpcCourierType, + name: "basic send unidirectional", + test: testBasicSendUnidirectional, }, { - name: "restart receiver check balance", - test: testRestartReceiverCheckBalance, - proofCourierType: proof.UniverseRpcCourierType, + name: "restart receiver check balance", + test: testRestartReceiverCheckBalance, }, { name: "resume pending package send", @@ -68,14 +64,12 @@ var testCases = []*testCase{ proofCourierType: proof.HashmailCourierType, }, { - name: "reattempt failed send uni courier", - test: testReattemptFailedSendUniCourier, - proofCourierType: proof.UniverseRpcCourierType, + name: "reattempt failed send uni courier", + test: testReattemptFailedSendUniCourier, }, { - name: "reattempt failed receive uni courier", - test: testReattemptFailedReceiveUniCourier, - proofCourierType: proof.UniverseRpcCourierType, + name: "reattempt failed receive uni courier", + test: testReattemptFailedReceiveUniCourier, }, { name: "offline receiver eventually receives", @@ -109,14 +103,12 @@ var testCases = []*testCase{ proofCourierType: proof.HashmailCourierType, }, { - name: "collectible send rpc courier", - test: testCollectibleSend, - proofCourierType: proof.UniverseRpcCourierType, + name: "collectible send", + test: testCollectibleSend, }, { - name: "collectible group send rpc courier", - test: testCollectibleGroupSend, - proofCourierType: proof.UniverseRpcCourierType, + name: "collectible group send", + test: testCollectibleGroupSend, }, { name: "re-issuance", From 9a7637169b6ecab29ed23d1c920e6fff020b2b79 Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Thu, 30 Nov 2023 18:07:49 -0600 Subject: [PATCH 11/54] itest: merge server harness and UniverseRPCHarness The original server harness was meant to be the actual universe RPC server from the beginning. So we can merge the two into a single one and start/use that by default. --- itest/aperture_harness.go | 4 +- itest/integration_test.go | 13 ++-- itest/server_harness.go | 108 ------------------------------- itest/tapd_harness.go | 2 +- itest/test_harness.go | 56 ++++++++-------- itest/universe_server_harness.go | 48 ++++++++++++++ itest/universerpc_harness.go | 53 --------------- 7 files changed, 85 insertions(+), 199 deletions(-) delete mode 100644 itest/server_harness.go create mode 100644 itest/universe_server_harness.go delete mode 100644 itest/universerpc_harness.go diff --git a/itest/aperture_harness.go b/itest/aperture_harness.go index 95500363a..e95061e97 100644 --- a/itest/aperture_harness.go +++ b/itest/aperture_harness.go @@ -27,7 +27,7 @@ type ApertureHarness struct { // NewApertureHarness creates a new instance of the aperture service. It returns // a harness which includes useful values for testing. -func NewApertureHarness(t *testing.T, port int) ApertureHarness { +func NewApertureHarness(t *testing.T, port int) *ApertureHarness { // Create a temporary directory for the aperture service to use. baseDir := filepath.Join(t.TempDir(), "aperture") err := os.MkdirAll(baseDir, os.ModePerm) @@ -55,7 +55,7 @@ func NewApertureHarness(t *testing.T, port int) ApertureHarness { } service := aperture.NewAperture(cfg) - return ApertureHarness{ + return &ApertureHarness{ ListenAddr: listenAddr, Service: service, } diff --git a/itest/integration_test.go b/itest/integration_test.go index 41dcf7f81..3f2557c21 100644 --- a/itest/integration_test.go +++ b/itest/integration_test.go @@ -59,11 +59,10 @@ func TestTaprootAssetsDaemon(t *testing.T) { // The universe server and tapd client are both freshly // created and later discarded for each test run to // assure no state is taken over between runs. - tapdHarness, universeServer, proofCourier := - setupHarnesses( - t1, ht, lndHarness, - testCase.proofCourierType, - ) + tapdHarness, uniHarness, proofCourier := setupHarnesses( + t1, ht, lndHarness, + testCase.proofCourierType, + ) lndHarness.EnsureConnected( lndHarness.Alice, lndHarness.Bob, ) @@ -72,8 +71,8 @@ func TestTaprootAssetsDaemon(t *testing.T) { lndHarness.Bob.AddToLogf(logLine) ht := ht.newHarnessTest( - t1, lndHarness, universeServer, - tapdHarness, proofCourier, + t1, lndHarness, uniHarness, tapdHarness, + proofCourier, ) // Now we have everything to run the test case. diff --git a/itest/server_harness.go b/itest/server_harness.go deleted file mode 100644 index 6fee73485..000000000 --- a/itest/server_harness.go +++ /dev/null @@ -1,108 +0,0 @@ -package itest - -import ( - "fmt" - "net" - "os" - "path/filepath" - "sync" - "time" - - "github.com/lightningnetwork/lnd/cert" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" -) - -const ( - // DefaultAutogenValidity is the default validity of a self-signed - // certificate. The value corresponds to 14 months - // (14 months * 30 days * 24 hours). - DefaultAutogenValidity = 14 * 30 * 24 * time.Hour -) - -type universeServerMock struct { - // TODO: Embed Unimplemented*Server of universe RPCs here to mock them. -} - -type serverHarness struct { - serverHost string - mockServer *grpc.Server - - certFile string - server *universeServerMock - - errChan chan error - - wg sync.WaitGroup -} - -func newServerHarness(serverHost string) *serverHarness { - return &serverHarness{ - serverHost: serverHost, - errChan: make(chan error, 1), - } -} - -func (s *serverHarness) stop() { - s.mockServer.Stop() - s.wg.Wait() -} - -func (s *serverHarness) start() error { - tempDirName, err := os.MkdirTemp("", "tapitest") - if err != nil { - return err - } - - s.certFile = filepath.Join(tempDirName, "proxy.cert") - keyFile := filepath.Join(tempDirName, "proxy.key") - creds, err := genCertPair(s.certFile, keyFile) - if err != nil { - return err - } - - httpListener, err := net.Listen("tcp", s.serverHost) - if err != nil { - return err - } - - s.mockServer = grpc.NewServer(grpc.Creds(creds)) - s.server = &universeServerMock{} - - // TODO(guggero): Register universe RPC servers here. - - s.wg.Add(1) - go func() { - defer s.wg.Done() - s.errChan <- s.mockServer.Serve(httpListener) - }() - - return nil -} - -// genCertPair generates a pair of private key and certificate and returns them -// in different formats needed to spin up test servers and clients. -func genCertPair(certFile, keyFile string) (credentials.TransportCredentials, - error) { - - certBytes, keyBytes, err := cert.GenCertPair( - "itest autogenerated cert", nil, nil, false, - DefaultAutogenValidity, - ) - if err != nil { - return nil, fmt.Errorf("unable to generate cert pair: %v", err) - } - - // Now that we have the certificate and key, we'll store them - // to the file system. - err = cert.WriteCertPair(certFile, keyFile, certBytes, keyBytes) - if err != nil { - return nil, fmt.Errorf("error writing cert pair: %w", err) - } - - creds, err := credentials.NewServerTLSFromFile(certFile, keyFile) - if err != nil { - return nil, fmt.Errorf("unable to load cert file: %v", err) - } - return creds, nil -} diff --git a/itest/tapd_harness.go b/itest/tapd_harness.go index c73bb1fe4..398415650 100644 --- a/itest/tapd_harness.go +++ b/itest/tapd_harness.go @@ -213,7 +213,7 @@ func newTapdHarness(t *testing.T, ht *harnessTest, cfg tapdConfig, typedProofCourier.ListenAddr, ) - case *UniverseRPCHarness: + case *universeServerHarness: finalCfg.DefaultProofCourierAddr = fmt.Sprintf( "%s://%s", proof.UniverseRpcCourierType, typedProofCourier.ListenAddr, diff --git a/itest/test_harness.go b/itest/test_harness.go index 993fbafa5..64cd737bd 100644 --- a/itest/test_harness.go +++ b/itest/test_harness.go @@ -95,7 +95,7 @@ type harnessTest struct { // nil if not yet set up. lndHarness *lntest.HarnessTest - universeServer *serverHarness + universeServer *universeServerHarness tapd *tapdHarness @@ -107,7 +107,7 @@ type harnessTest struct { // newHarnessTest creates a new instance of a harnessTest from a regular // testing.T instance. func (h *harnessTest) newHarnessTest(t *testing.T, net *lntest.HarnessTest, - universeServer *serverHarness, tapd *tapdHarness, + universeServer *universeServerHarness, tapd *tapdHarness, proofCourier proof.CourierHarness) *harnessTest { return &harnessTest{ @@ -174,7 +174,11 @@ func (h *harnessTest) LogfTimestamped(format string, args ...interface{}) { // shutdown stops both the mock universe and tapd server. func (h *harnessTest) shutdown(_ *testing.T) error { - h.universeServer.stop() + err := h.universeServer.Stop() + if err != nil { + return fmt.Errorf("unable to stop universe server harness: "+ + "%w", err) + } if h.proofCourier != nil { err := h.proofCourier.Stop() @@ -184,7 +188,7 @@ func (h *harnessTest) shutdown(_ *testing.T) error { } } - err := h.tapd.stop(!*noDelete) + err = h.tapd.stop(!*noDelete) if err != nil { return fmt.Errorf("unable to stop tapd: %v", err) } @@ -269,7 +273,15 @@ func nextAvailablePort() int { func setupHarnesses(t *testing.T, ht *harnessTest, lndHarness *lntest.HarnessTest, proofCourierType proof.CourierType) (*tapdHarness, - *serverHarness, proof.CourierHarness) { + *universeServerHarness, proof.CourierHarness) { + + universeServer := newUniverseServerHarness(t, ht, lndHarness.Bob) + + t.Logf("Starting universe server harness, listening on %v", + universeServer.ListenAddr) + + err := universeServer.Start(nil) + require.NoError(t, err, "universe server harness") // If a proof courier type is specified, start test specific proof // courier service and attach to test harness. @@ -277,28 +289,18 @@ func setupHarnesses(t *testing.T, ht *harnessTest, switch proofCourierType { case proof.HashmailCourierType: port := nextAvailablePort() - apHarness := NewApertureHarness(ht.t, port) - proofCourier = &apHarness + apertureHarness := NewApertureHarness(ht.t, port) + err := apertureHarness.Start(nil) + require.NoError(t, err, "aperture proof courier harness") + + proofCourier = apertureHarness // If nothing is specified, we use the universe RPC proof courier by // default. default: - proofCourier = NewUniverseRPCHarness(t, ht, lndHarness.Bob) + proofCourier = universeServer } - // Start the proof courier harness if specified. - if proofCourier != nil { - err := proofCourier.Start(nil) - require.NoError(t, err, "unable to start proof courier harness") - } - - mockServerAddr := fmt.Sprintf( - node.ListenerFormat, node.NextAvailablePort(), - ) - universeServer := newServerHarness(mockServerAddr) - err := universeServer.start() - require.NoError(t, err) - // Create a tapd that uses Bob and connect it to the universe server. tapdHarness := setupTapdHarness( t, ht, lndHarness.Alice, universeServer, @@ -351,7 +353,7 @@ type Option func(*tapdHarnessParams) // setupTapdHarness creates a new tapd that connects to the given lnd node // and to the given universe server. func setupTapdHarness(t *testing.T, ht *harnessTest, - node *node.HarnessNode, universe *serverHarness, + node *node.HarnessNode, universe *universeServerHarness, opts ...Option) *tapdHarness { // Set parameters by executing option functions. @@ -379,12 +381,10 @@ func setupTapdHarness(t *testing.T, ht *harnessTest, ho.addrAssetSyncerDisable = params.addrAssetSyncerDisable } - tapdHarness, err := newTapdHarness( - t, ht, tapdConfig{ - NetParams: harnessNetParams, - LndNode: node, - }, harnessOpts, - ) + tapdHarness, err := newTapdHarness(t, ht, tapdConfig{ + NetParams: harnessNetParams, + LndNode: node, + }, harnessOpts) require.NoError(t, err) // Start the tapd harness now. diff --git a/itest/universe_server_harness.go b/itest/universe_server_harness.go new file mode 100644 index 000000000..b461d74da --- /dev/null +++ b/itest/universe_server_harness.go @@ -0,0 +1,48 @@ +package itest + +import ( + "testing" + + "github.com/lightninglabs/taproot-assets/proof" + "github.com/lightningnetwork/lnd/lntest/node" + "github.com/stretchr/testify/require" +) + +type universeServerHarness struct { + // service is the instance of the universe tap service. + service *tapdHarness + + // ListenAddr is the address that the service is listening on. + ListenAddr string +} + +func newUniverseServerHarness(t *testing.T, ht *harnessTest, + lndHarness *node.HarnessNode) *universeServerHarness { + + service, err := newTapdHarness(t, ht, tapdConfig{ + NetParams: harnessNetParams, + LndNode: lndHarness, + }) + require.NoError(t, err) + + return &universeServerHarness{ + service: service, + ListenAddr: service.rpcHost(), + } +} + +// Start starts the service. +func (h *universeServerHarness) Start(_ chan error) error { + return h.service.start(false) +} + +// Stop stops the service. +func (h *universeServerHarness) Stop() error { + // Don't delete temporary data on stop. This will allow us to cleanly + // restart the service mid-test. + return h.service.stop(false) +} + +// Ensure that universeServerHarness implements the proof.CourierHarness +// interface. +var _ proof.CourierHarness = (*universeServerHarness)(nil) diff --git a/itest/universerpc_harness.go b/itest/universerpc_harness.go deleted file mode 100644 index 4f0073fc2..000000000 --- a/itest/universerpc_harness.go +++ /dev/null @@ -1,53 +0,0 @@ -package itest - -import ( - "testing" - - "github.com/lightninglabs/taproot-assets/proof" - "github.com/lightningnetwork/lnd/lntest/node" - "github.com/stretchr/testify/require" -) - -// UniverseRPCHarness is an integration testing harness for the universe tap -// service. -type UniverseRPCHarness struct { - // service is the instance of the universe tap service. - service *tapdHarness - - // ListenAddr is the address that the service is listening on. - ListenAddr string -} - -// NewUniverseRPCHarness creates a new test harness for a universe tap service. -func NewUniverseRPCHarness(t *testing.T, ht *harnessTest, - lndHarness *node.HarnessNode) *UniverseRPCHarness { - - service, err := newTapdHarness( - t, ht, tapdConfig{ - NetParams: harnessNetParams, - LndNode: lndHarness, - }, - ) - require.NoError(t, err) - - return &UniverseRPCHarness{ - service: service, - ListenAddr: service.rpcHost(), - } -} - -// Start starts the service. -func (h *UniverseRPCHarness) Start(_ chan error) error { - return h.service.start(false) -} - -// Stop stops the service. -func (h *UniverseRPCHarness) Stop() error { - // Don't delete temporary data on stop. This will allow us to cleanly - // restart the service mid-test. - return h.service.stop(false) -} - -// Ensure that NewUniverseRPCHarness implements the proof.CourierHarness -// interface. -var _ proof.CourierHarness = (*UniverseRPCHarness)(nil) From 2f5abf33606f77422acb456c0841f5331f9bec27 Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Thu, 30 Nov 2023 18:17:13 -0600 Subject: [PATCH 12/54] itest: remove manual proof transfer for addr sends With this commit we remove all instances of the manual proof transfer from call sites that do an actual non-interactive (address based) transfer, for which a proof courier should now handle the proof transmission. --- itest/addrs_test.go | 17 ----------------- itest/full_value_split_test.go | 8 ++++---- itest/multi_asset_group_test.go | 8 -------- itest/psbt_test.go | 22 ++++++++++++++++++++-- itest/re-issuance_test.go | 26 +++++--------------------- itest/re-org_test.go | 6 ------ itest/round_trip_send_test.go | 8 ++------ itest/send_test.go | 6 ------ 8 files changed, 31 insertions(+), 70 deletions(-) diff --git a/itest/addrs_test.go b/itest/addrs_test.go index b04f556aa..48a23ff9d 100644 --- a/itest/addrs_test.go +++ b/itest/addrs_test.go @@ -79,12 +79,6 @@ func testAddresses(t *harnessTest) { // Eventually the event should be marked as confirmed. AssertAddrEvent(t.t, secondTapd, addr, 1, statusConfirmed) - // To complete the transfer, we'll export the proof from the - // sender and import it into the receiver for each asset set. - sendProof( - t, t.tapd, secondTapd, addr.ScriptKey, a.AssetGenesis, - ) - // Make sure we have imported and finalized all proofs. AssertNonInteractiveRecvComplete(t.t, secondTapd, idx+1) @@ -436,13 +430,11 @@ func runMultiSendTest(ctxt context.Context, t *harnessTest, alice, // In order to force a split, we don't try to send the full asset. const sendAmt = 100 - var bobAddresses []*taprpc.Addr bobAddr1, err := bob.NewAddr(ctxt, &taprpc.NewAddrRequest{ AssetId: genInfo.AssetId, Amt: sendAmt, }) require.NoError(t.t, err) - bobAddresses = append(bobAddresses, bobAddr1) AssertAddrCreated(t.t, bob, mintedAsset, bobAddr1) bobAddr2, err := bob.NewAddr(ctxt, &taprpc.NewAddrRequest{ @@ -450,7 +442,6 @@ func runMultiSendTest(ctxt context.Context, t *harnessTest, alice, Amt: sendAmt, }) require.NoError(t.t, err) - bobAddresses = append(bobAddresses, bobAddr2) AssertAddrCreated(t.t, bob, mintedAsset, bobAddr2) // To test that Alice can also receive to multiple addresses in a single @@ -492,14 +483,6 @@ func runMultiSendTest(ctxt context.Context, t *harnessTest, alice, // this point, so the status should go to completed directly. AssertAddrEventByStatus(t.t, alice, statusCompleted, numRuns*2) - // To complete the transfer, we'll export the proof from the sender and - // import it into the receiver for each asset set. This should not be - // necessary for the sends to Alice, as she is both the sender and - // receiver and should detect the local proof once it's written to disk. - for i := range bobAddresses { - sendProof(t, alice, bob, bobAddresses[i].ScriptKey, genInfo) - } - // Make sure we have imported and finalized all proofs. AssertNonInteractiveRecvComplete(t.t, bob, numRuns*2) AssertNonInteractiveRecvComplete(t.t, alice, numRuns*2) diff --git a/itest/full_value_split_test.go b/itest/full_value_split_test.go index 75854b0ca..c60a53ff6 100644 --- a/itest/full_value_split_test.go +++ b/itest/full_value_split_test.go @@ -88,8 +88,8 @@ func runFullValueSendTests(ctxt context.Context, t *harnessTest, alice, []uint64{0, fullAmount}, senderTransferIdx, senderTransferIdx+1, ) - _ = sendProof( - t, alice, bob, receiverAddr.ScriptKey, genInfo, + AssertNonInteractiveRecvComplete( + t.t, bob, senderTransferIdx+1, ) senderTransferIdx++ } else { @@ -108,8 +108,8 @@ func runFullValueSendTests(ctxt context.Context, t *harnessTest, alice, genInfo.AssetId, []uint64{0, fullAmount}, receiverTransferIdx, receiverTransferIdx+1, ) - _ = sendProof( - t, bob, alice, receiverAddr.ScriptKey, genInfo, + AssertNonInteractiveRecvComplete( + t.t, alice, receiverTransferIdx+1, ) receiverTransferIdx++ } diff --git a/itest/multi_asset_group_test.go b/itest/multi_asset_group_test.go index 9b4add64d..82d9706a0 100644 --- a/itest/multi_asset_group_test.go +++ b/itest/multi_asset_group_test.go @@ -129,10 +129,6 @@ func testMintMultiAssetGroups(t *harnessTest) { normalMember.AssetGenesis.AssetId, []uint64{0, normalMember.Amount}, 0, 1, ) - _ = sendProof( - t, t.tapd, secondTapd, bobNormalAddr.ScriptKey, - normalMemberGenInfo, - ) AssertNonInteractiveRecvComplete(t.t, secondTapd, 1) AssertBalanceByGroup( @@ -170,10 +166,6 @@ func testMintMultiAssetGroups(t *harnessTest) { collectMember.AssetGenesis.AssetId, []uint64{0, collectMember.Amount}, 1, 2, ) - sendProof( - t, t.tapd, secondTapd, bobCollectAddr.ScriptKey, - collectMemberGenInfo, - ) AssertNonInteractiveRecvComplete(t.t, secondTapd, 2) AssertBalanceByGroup( diff --git a/itest/psbt_test.go b/itest/psbt_test.go index d587f2097..c0711ce4e 100644 --- a/itest/psbt_test.go +++ b/itest/psbt_test.go @@ -132,6 +132,10 @@ func testPsbtScriptHashLockSend(t *harnessTest) { t.t, t.lndHarness.Miner.Client, bob, sendResp, genInfo.AssetId, []uint64{numUnits / 2, numUnits / 2}, 0, 1, ) + + // This is an interactive/PSBT based transfer, so we do need to manually + // send the proof from the sender to the receiver because the proof + // courier address gets lost in the address->PSBT conversion. _ = sendProof(t, bob, alice, aliceAddr.ScriptKey, genInfo) AssertNonInteractiveRecvComplete(t.t, alice, 1) @@ -258,6 +262,10 @@ func testPsbtScriptCheckSigSend(t *harnessTest) { t.t, t.lndHarness.Miner.Client, bob, sendResp, genInfo.AssetId, []uint64{numUnits / 2, numUnits / 2}, 0, 1, ) + + // This is an interactive/PSBT based transfer, so we do need to manually + // send the proof from the sender to the receiver because the proof + // courier address gets lost in the address->PSBT conversion. _ = sendProof(t, bob, alice, aliceAddr.ScriptKey, genInfo) AssertNonInteractiveRecvComplete(t.t, alice, 1) @@ -438,6 +446,9 @@ func runPsbtInteractiveFullValueSendTest(ctxt context.Context, t *harnessTest, sendResp, genInfo.AssetId, amounts, i/2, (i/2)+1, numOutputs, ) + + // This is an interactive transfer, so we do need to manually + // send the proof from the sender to the receiver. _ = sendProof( t, sender, receiver, receiverScriptKey.PubKey.SerializeCompressed(), genInfo, @@ -656,6 +667,9 @@ func runPsbtInteractiveSplitSendTest(ctxt context.Context, t *harnessTest, []uint64{sendAmt, changeAmt}, i/2, (i/2)+1, numOutputs, ) + + // This is an interactive transfer, so we do need to manually + // send the proof from the sender to the receiver. _ = sendProof( t, sender, receiver, receiverScriptKey.PubKey.SerializeCompressed(), genInfo, @@ -779,6 +793,9 @@ func testPsbtInteractiveTapscriptSibling(t *harnessTest) { t.t, t.lndHarness.Miner.Client, alice, sendResp, genInfo.AssetId, []uint64{sendAmt, changeAmt}, 0, 1, 2, ) + + // This is an interactive transfer, so we do need to manually send the + // proof from the sender to the receiver. _ = sendProof( t, alice, bob, receiverScriptKey.PubKey.SerializeCompressed(), genInfo, @@ -927,6 +944,9 @@ func testPsbtMultiSend(t *harnessTest) { t.t, t.lndHarness.Miner.Client, sender, sendResp, genInfo.AssetId, outputAmounts, 0, 1, numOutputs, ) + + // This is an interactive transfer, so we do need to manually send the + // proof from the sender to the receiver. _ = sendProof( t, sender, receiver, receiverScriptKey1.PubKey.SerializeCompressed(), genInfo, @@ -1034,7 +1054,6 @@ func sendToTapscriptAddr(ctx context.Context, t *harnessTest, alice, t.t, t.lndHarness.Miner.Client, alice, sendResp, genInfo.AssetId, []uint64{changeUnits, numUnits}, 0, 1, ) - _ = sendProof(t, alice, bob, bobAddr.ScriptKey, genInfo) AssertNonInteractiveRecvComplete(t.t, bob, 1) } @@ -1059,7 +1078,6 @@ func sendAssetAndAssert(ctx context.Context, t *harnessTest, alice, genInfo.AssetId, []uint64{change, numUnits}, outTransferIdx, numOutTransfers, ) - _ = sendProof(t, alice, bob, bobAddr.ScriptKey, genInfo) // There are now two receive events (since only non-interactive sends // appear in that RPC output). diff --git a/itest/re-issuance_test.go b/itest/re-issuance_test.go index b92d27b85..6c140dab2 100644 --- a/itest/re-issuance_test.go +++ b/itest/re-issuance_test.go @@ -78,10 +78,7 @@ func testReIssuance(t *harnessTest) { t.t, t.lndHarness.Miner.Client, t.tapd, firstCollectSend, collectGenInfo.AssetId, []uint64{0, 1}, 0, 1, ) - sendProof( - t, t.tapd, secondTapd, collectGroupAddr.ScriptKey, - collectGenInfo, - ) + AssertNonInteractiveRecvComplete(t.t, secondTapd, 1) // Check the state of both nodes. The first node should show one // zero-value transfer representing the send of the collectible. @@ -107,10 +104,7 @@ func testReIssuance(t *harnessTest) { normalGenInfo.AssetId, []uint64{normalGroupMintHalf, normalGroupMintHalf}, 1, 2, ) - sendProof( - t, t.tapd, secondTapd, normalGroupAddr.ScriptKey, - normalGenInfo, - ) + AssertNonInteractiveRecvComplete(t.t, secondTapd, 2) // Reissue one more collectible and half the original mint amount for // the normal asset. @@ -186,10 +180,7 @@ func testReIssuance(t *harnessTest) { t.t, t.lndHarness.Miner.Client, t.tapd, secondCollectSend, collectReissueInfo.AssetId, []uint64{0, 1}, 2, 3, ) - sendProof( - t, t.tapd, secondTapd, collectReissueAddr.ScriptKey, - collectReissueInfo, - ) + AssertNonInteractiveRecvComplete(t.t, secondTapd, 3) // The second node should show two groups, with two assets in // the collectible group and a total balance of 2 for that group. @@ -220,10 +211,7 @@ func testReIssuance(t *harnessTest) { t.t, secondTapd.ht.lndHarness.Miner.Client, secondTapd, thirdCollectSend, collectGenInfo.AssetId, []uint64{0, 1}, 0, 1, ) - sendProof( - t, secondTapd, t.tapd, collectReissueAddr.ScriptKey, - collectReissueInfo, - ) + AssertNonInteractiveRecvComplete(t.t, t.tapd, 1) // The collectible balance on the minting node should be 1, and there // should still be only two groups. @@ -382,12 +370,8 @@ func testMintWithGroupKeyErrors(t *harnessTest) { t.t, t.lndHarness.Miner.Client, t.tapd, collectSend, collectGenInfo.AssetId, []uint64{0, 1}, 0, 1, ) - sendProof( - t, t.tapd, secondTapd, collectGroupAddr.ScriptKey, - collectGenInfo, - ) - // A reissuance with the second node should still fail because the + // A re-issuance with the second node should still fail because the // group key was not created by that node. _, err = secondTapd.MintAsset(ctxb, reissueRequest) require.ErrorContains(t.t, err, "can't sign with group key") diff --git a/itest/re-org_test.go b/itest/re-org_test.go index b77762f5a..6f3524b08 100644 --- a/itest/re-org_test.go +++ b/itest/re-org_test.go @@ -170,9 +170,6 @@ func testReOrgSend(t *harnessTest) { sendAssetGen.AssetId, []uint64{sendAsset.Amount - sendAmount, sendAmount}, 0, 1, ) - _ = sendProof( - t, t.tapd, secondTapd, bobAddr.ScriptKey, sendAssetGen, - ) AssertNonInteractiveRecvComplete(t.t, secondTapd, 1) initialBlockHash := initialBlock.BlockHash() @@ -291,9 +288,6 @@ func testReOrgMintAndSend(t *harnessTest) { sendAssetGen.AssetId, []uint64{sendAsset.Amount - sendAmount, sendAmount}, 0, 1, ) - _ = sendProof( - t, t.tapd, secondTapd, bobAddr.ScriptKey, sendAssetGen, - ) AssertNonInteractiveRecvComplete(t.t, secondTapd, 1) initialBlockHash := initialBlock.BlockHash() diff --git a/itest/round_trip_send_test.go b/itest/round_trip_send_test.go index 784ca1571..87bdb5ca6 100644 --- a/itest/round_trip_send_test.go +++ b/itest/round_trip_send_test.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "encoding/hex" - "time" "github.com/btcsuite/btcd/btcec/v2" "github.com/btcsuite/btcd/btcutil" @@ -81,7 +80,7 @@ func testRoundTripSend(t *harnessTest) { t.t, t.lndHarness.Miner.Client, t.tapd, sendResp, genInfo.AssetId, []uint64{bobAmt, bobAmt}, 0, 1, ) - _ = sendProof(t, t.tapd, secondTapd, bobAddr.ScriptKey, genInfo) + AssertNonInteractiveRecvComplete(t.t, secondTapd, 1) // Now, Alice will request half of the assets she sent to Bob. aliceAddr, err := t.tapd.NewAddr(ctxb, &taprpc.NewAddrRequest{ @@ -101,10 +100,7 @@ func testRoundTripSend(t *harnessTest) { t.t, t.lndHarness.Miner.Client, secondTapd, sendResp, genInfo.AssetId, []uint64{aliceAmt, aliceAmt}, 0, 1, ) - _ = sendProof(t, secondTapd, t.tapd, aliceAddr.ScriptKey, genInfo) - - // Give both nodes some time to process the final transfer. - time.Sleep(time.Second * 1) + AssertNonInteractiveRecvComplete(t.t, t.tapd, 1) // Check the final state of both nodes. Each node should list // one transfer, and Alice should have 3/4 of the total units. diff --git a/itest/send_test.go b/itest/send_test.go index 2a44335e1..a57301dc6 100644 --- a/itest/send_test.go +++ b/itest/send_test.go @@ -1240,7 +1240,6 @@ func testMultiInputSendNonInteractiveSingleID(t *harnessTest) { genInfo.AssetId, []uint64{4000, 1000}, 0, 1, ) - _ = sendProof(t, t.tapd, bobTapd, addr.ScriptKey, genInfo) AssertNonInteractiveRecvComplete(t.t, bobTapd, 1) // Second of two send events from minting node to the secondary node. @@ -1261,7 +1260,6 @@ func testMultiInputSendNonInteractiveSingleID(t *harnessTest) { genInfo.AssetId, []uint64{0, 4000}, 1, 2, ) - _ = sendProof(t, t.tapd, bobTapd, addr.ScriptKey, genInfo) AssertNonInteractiveRecvComplete(t.t, bobTapd, 2) t.Logf("Two separate send events complete, now attempting to send " + @@ -1285,7 +1283,6 @@ func testMultiInputSendNonInteractiveSingleID(t *harnessTest) { genInfo.AssetId, []uint64{0, 5000}, 0, 1, ) - _ = sendProof(t, bobTapd, t.tapd, addr.ScriptKey, genInfo) AssertNonInteractiveRecvComplete(t.t, t.tapd, 1) } @@ -1388,9 +1385,6 @@ func testSendMultipleCoins(t *harnessTest) { // Now we confirm the 5 transfers and make sure they complete as // expected. _ = MineBlocks(t.t, t.lndHarness.Miner.Client, 1, 5) - for _, addr := range bobAddrs { - _ = sendProof(t, t.tapd, secondTapd, addr.ScriptKey, genInfo) - } AssertNonInteractiveRecvComplete(t.t, secondTapd, 5) } From 71c537f2d88fe64dd2203c731a53a58334deb31c Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Fri, 1 Dec 2023 18:05:43 -0500 Subject: [PATCH 13/54] itest: add main universe server as federation member By default we want all new tapd instances to be synced with the main universe server, so we add it as a federation member unless specifically disabled. --- itest/addrs_test.go | 10 ++++++++-- itest/mint_batch_stress_test.go | 2 +- itest/test_harness.go | 29 +++++++++++++++++++++++++++++ itest/universe_pagination_test.go | 2 +- itest/universe_test.go | 10 ++++++++-- 5 files changed, 47 insertions(+), 6 deletions(-) diff --git a/itest/addrs_test.go b/itest/addrs_test.go index 48a23ff9d..93a0eda4b 100644 --- a/itest/addrs_test.go +++ b/itest/addrs_test.go @@ -189,7 +189,12 @@ func testMultiAddress(t *harnessTest) { func testAddressAssetSyncer(t *harnessTest) { // We'll kick off the test by making a new node, without hooking it up // to any existing Universe server. - bob := setupTapdHarness(t.t, t, t.lndHarness.Bob, nil) + bob := setupTapdHarness( + t.t, t, t.lndHarness.Bob, t.universeServer, + func(params *tapdHarnessParams) { + params.noDefaultUniverseSync = true + }, + ) defer func() { require.NoError(t.t, bob.stop(!*noDelete)) }() @@ -315,8 +320,9 @@ func testAddressAssetSyncer(t *harnessTest) { restartBobNoUniSync := func(disableSyncer bool) { require.NoError(t.t, bob.stop(!*noDelete)) bob = setupTapdHarness( - t.t, t, t.lndHarness.Bob, nil, + t.t, t, t.lndHarness.Bob, t.universeServer, func(params *tapdHarnessParams) { + params.noDefaultUniverseSync = true params.addrAssetSyncerDisable = disableSyncer }, ) diff --git a/itest/mint_batch_stress_test.go b/itest/mint_batch_stress_test.go index b4103ebc1..124ae44ea 100644 --- a/itest/mint_batch_stress_test.go +++ b/itest/mint_batch_stress_test.go @@ -57,7 +57,7 @@ func testMintBatchNStressTest(t *harnessTest, batchSize int, // If we create a second tapd instance and sync the universe state, // the synced tree should match the source tree. bob := setupTapdHarness( - t.t, t, t.lndHarness.Bob, nil, + t.t, t, t.lndHarness.Bob, t.universeServer, ) defer func() { require.NoError(t.t, bob.stop(!*noDelete)) diff --git a/itest/test_harness.go b/itest/test_harness.go index 64cd737bd..1550f3a7b 100644 --- a/itest/test_harness.go +++ b/itest/test_harness.go @@ -240,6 +240,25 @@ func (h *harnessTest) syncUniverseState(target, syncer *tapdHarness, require.Equal(h.t, numExpectedAssets, numAssets) } +// addFederationServer adds a new federation server to the given tapd harness. +func (h *harnessTest) addFederationServer(host string, target *tapdHarness) { + ctxt, cancel := context.WithTimeout( + context.Background(), defaultWaitTimeout, + ) + defer cancel() + + _, err := target.AddFederationServer( + ctxt, &unirpc.AddFederationServerRequest{ + Servers: []*unirpc.UniverseFederationServer{ + { + Host: host, + }, + }, + }, + ) + require.NoError(h.t, err) +} + // nextAvailablePort returns the first port that is available for listening by // a new node. It panics if no port is found and the maximum available TCP port // is reached. @@ -346,6 +365,10 @@ type tapdHarnessParams struct { // startupSyncNumAssets is the number of assets that are expected to be // synced from the above node. startupSyncNumAssets int + + // noDefaultUniverseSync indicates whether the default universe server + // should be added as a federation server or not. + noDefaultUniverseSync bool } type Option func(*tapdHarnessParams) @@ -391,6 +414,12 @@ func setupTapdHarness(t *testing.T, ht *harnessTest, err = tapdHarness.start(params.expectErrExit) require.NoError(t, err) + // Add the default universe server as a federation server, unless + // specifically indicated by the caller. + if !params.noDefaultUniverseSync { + ht.addFederationServer(universe.service.rpcHost(), tapdHarness) + } + // Before we exit, we'll check to see if we need to sync the universe // state. if params.startupSyncNode != nil { diff --git a/itest/universe_pagination_test.go b/itest/universe_pagination_test.go index 2a639f938..9ef4c1065 100644 --- a/itest/universe_pagination_test.go +++ b/itest/universe_pagination_test.go @@ -36,7 +36,7 @@ func testUniversePaginationSimple(t *harnessTest) { // If we create a second tapd instance and sync the universe state, // the synced tree should match the source tree. bob := setupTapdHarness( - t.t, t, t.lndHarness.Bob, nil, + t.t, t, t.lndHarness.Bob, t.universeServer, ) defer func() { require.NoError(t.t, bob.stop(!*noDelete)) diff --git a/itest/universe_test.go b/itest/universe_test.go index 57597e273..5f9fe5b19 100644 --- a/itest/universe_test.go +++ b/itest/universe_test.go @@ -42,7 +42,10 @@ func testUniverseSync(t *harnessTest) { // With those assets created, we'll now create a new node that we'll // use to exercise the Universe sync. bob := setupTapdHarness( - t.t, t, t.lndHarness.Bob, nil, + t.t, t, t.lndHarness.Bob, t.universeServer, + func(params *tapdHarnessParams) { + params.noDefaultUniverseSync = true + }, ) defer func() { require.NoError(t.t, bob.stop(!*noDelete)) @@ -385,7 +388,10 @@ func testUniverseFederation(t *harnessTest) { // We'll kick off the test by making a new node, without hooking it up to // any existing Universe server. bob := setupTapdHarness( - t.t, t, t.lndHarness.Bob, nil, + t.t, t, t.lndHarness.Bob, t.universeServer, + func(params *tapdHarnessParams) { + params.noDefaultUniverseSync = true + }, ) defer func() { require.NoError(t.t, bob.stop(!*noDelete)) From c45ac7202c287341f497ffff78b3fb461c974e19 Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Fri, 1 Dec 2023 18:07:57 -0500 Subject: [PATCH 14/54] itest: extract and tune proof courier backoff config With this commit we optimize the backoff config to enable much faster proof transmission during integration tests. A significant speedup also comes from reducing the time the custodian waits between detecting a transfer on-chain and attempting to fetch the proof with the courier. --- itest/tapd_harness.go | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/itest/tapd_harness.go b/itest/tapd_harness.go index 398415650..fe81f4ef2 100644 --- a/itest/tapd_harness.go +++ b/itest/tapd_harness.go @@ -46,6 +46,20 @@ var ( tapdb.DefaultPostgresFixtureLifetime, "The amount of time to "+ "allow the postgres fixture to run in total. Needs "+ "to be increased for long-running tests.") + + // defaultBackoffConfig is the default backoff config we'll use for + // sending proofs. + defaultBackoffConfig = proof.BackoffCfg{ + BackoffResetWait: time.Second, + NumTries: 5, + InitialBackoff: 300 * time.Millisecond, + MaxBackoff: 600 * time.Millisecond, + } + + // defaultProofRetrievalDelay is the default delay we'll use for the + // custodian to wait from observing a transaction on-chan to retrieving + // the proof from the courier. + defaultProofRetrievalDelay = 200 * time.Millisecond ) const ( @@ -182,14 +196,9 @@ func newTapdHarness(t *testing.T, ht *harnessTest, cfg tapdConfig, // Populate proof courier specific config fields. // // Use passed in backoff config or default config. - backoffCfg := &proof.BackoffCfg{ - BackoffResetWait: 2 * time.Second, - NumTries: 3, - InitialBackoff: 2 * time.Second, - MaxBackoff: 2 * time.Second, - } + backoffCfg := defaultBackoffConfig if opts.proofSendBackoffCfg != nil { - backoffCfg = opts.proofSendBackoffCfg + backoffCfg = *opts.proofSendBackoffCfg } // Used passed in proof receiver ack timeout or default. @@ -203,7 +212,7 @@ func newTapdHarness(t *testing.T, ht *harnessTest, cfg tapdConfig, // config from the hashmail courier config. finalCfg.HashMailCourier = &proof.HashMailCourierCfg{ ReceiverAckTimeout: receiverAckTimeout, - BackoffCfg: backoffCfg, + BackoffCfg: &backoffCfg, } switch typedProofCourier := (opts.proofCourier).(type) { @@ -224,7 +233,11 @@ func newTapdHarness(t *testing.T, ht *harnessTest, cfg tapdConfig, finalCfg.HashMailCourier = nil } + ht.t.Logf("Using proof courier address: %v", + finalCfg.DefaultProofCourierAddr) + // Set the custodian proof retrieval delay if it was specified. + finalCfg.CustodianProofRetrievalDelay = defaultProofRetrievalDelay if opts.custodianProofRetrievalDelay != nil { finalCfg.CustodianProofRetrievalDelay = *opts.custodianProofRetrievalDelay } From b072f302305341bb8ddad5d298b27edd95973547 Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Fri, 1 Dec 2023 18:35:27 -0500 Subject: [PATCH 15/54] itest: don't sync test nodes to each other Since we now have a main universe server by default, we can disable test nodes syncing directly to each other by default. --- itest/addrs_test.go | 8 -------- itest/collectible_split_test.go | 4 ---- itest/full_value_split_test.go | 4 ---- itest/multi_asset_group_test.go | 8 -------- itest/psbt_test.go | 32 -------------------------------- itest/re-issuance_test.go | 9 --------- itest/re-org_test.go | 12 ------------ itest/round_trip_send_test.go | 4 ---- itest/send_test.go | 18 ------------------ 9 files changed, 99 deletions(-) diff --git a/itest/addrs_test.go b/itest/addrs_test.go index 93a0eda4b..657cafc64 100644 --- a/itest/addrs_test.go +++ b/itest/addrs_test.go @@ -40,10 +40,6 @@ func testAddresses(t *harnessTest) { // assets made above. secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = len(rpcAssets) - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) @@ -169,10 +165,6 @@ func testMultiAddress(t *harnessTest) { alice := t.tapd bob := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = alice - params.startupSyncNumAssets = len(rpcAssets) - }, ) defer func() { require.NoError(t.t, bob.stop(!*noDelete)) diff --git a/itest/collectible_split_test.go b/itest/collectible_split_test.go index f8fd17035..2f987e8e7 100644 --- a/itest/collectible_split_test.go +++ b/itest/collectible_split_test.go @@ -53,10 +53,6 @@ func testCollectibleSend(t *harnessTest) { // serve as the node which'll receive the assets. secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = len(rpcAssets) - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) diff --git a/itest/full_value_split_test.go b/itest/full_value_split_test.go index c60a53ff6..84afb2430 100644 --- a/itest/full_value_split_test.go +++ b/itest/full_value_split_test.go @@ -33,10 +33,6 @@ func testFullValueSend(t *harnessTest) { // serve as the node which'll receive the assets. secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = len(rpcAssets) - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) diff --git a/itest/multi_asset_group_test.go b/itest/multi_asset_group_test.go index 82d9706a0..3dae13188 100644 --- a/itest/multi_asset_group_test.go +++ b/itest/multi_asset_group_test.go @@ -97,10 +97,6 @@ func testMintMultiAssetGroups(t *harnessTest) { // ensure that they can be sent and received correctly. secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = 4 - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) @@ -325,10 +321,6 @@ func testMultiAssetGroupSend(t *harnessTest) { // assets made above. secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = groupCount - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) diff --git a/itest/psbt_test.go b/itest/psbt_test.go index c0711ce4e..245ac0566 100644 --- a/itest/psbt_test.go +++ b/itest/psbt_test.go @@ -44,10 +44,6 @@ func testPsbtScriptHashLockSend(t *harnessTest) { // serve as the node which'll receive the assets. secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = len(rpcAssets) - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) @@ -170,10 +166,6 @@ func testPsbtScriptCheckSigSend(t *harnessTest) { // serve as the node which'll receive the assets. secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = len(rpcAssets) - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) @@ -315,10 +307,6 @@ func testPsbtNormalInteractiveFullValueSend(t *harnessTest) { // serve as the node which'll receive the assets. secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = len(rpcAssets) - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) @@ -366,10 +354,6 @@ func testPsbtGroupedInteractiveFullValueSend(t *harnessTest) { // serve as the node which'll receive the assets. secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = len(rpcAssets) - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) @@ -526,10 +510,6 @@ func testPsbtNormalInteractiveSplitSend(t *harnessTest) { // serve as the node which'll receive the assets. secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = len(rpcAssets) - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) @@ -577,10 +557,6 @@ func testPsbtGroupedInteractiveSplitSend(t *harnessTest) { // serve as the node which'll receive the assets. secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = len(rpcAssets) - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) @@ -735,10 +711,6 @@ func testPsbtInteractiveTapscriptSibling(t *harnessTest) { // serve as the node which'll receive the assets. secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(p *tapdHarnessParams) { - p.startupSyncNode = t.tapd - p.startupSyncNumAssets = len(rpcAssets) - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) @@ -863,10 +835,6 @@ func testPsbtMultiSend(t *harnessTest) { // serve as the node which'll receive the assets. secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = len(rpcAssets) - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) diff --git a/itest/re-issuance_test.go b/itest/re-issuance_test.go index 6c140dab2..a043fb1df 100644 --- a/itest/re-issuance_test.go +++ b/itest/re-issuance_test.go @@ -51,13 +51,8 @@ func testReIssuance(t *harnessTest) { // Create a second node, which will have no information about previously // minted assets or asset groups. - numTotalAssets := len(normalGroupGen) + len(collectGroupGen) secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = numTotalAssets - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) @@ -341,10 +336,6 @@ func testMintWithGroupKeyErrors(t *harnessTest) { // minted assets or asset groups. secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = len(collectGroupGen) - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) diff --git a/itest/re-org_test.go b/itest/re-org_test.go index 6f3524b08..21e29a79e 100644 --- a/itest/re-org_test.go +++ b/itest/re-org_test.go @@ -53,10 +53,6 @@ func testReOrgMint(t *harnessTest) { // node will be used to synchronize universe state. secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = len(assetList) - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) @@ -139,10 +135,6 @@ func testReOrgSend(t *harnessTest) { // node will be used to synchronize universe state. secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = len(assetList) - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) @@ -262,10 +254,6 @@ func testReOrgMintAndSend(t *harnessTest) { // node will be used to synchronize universe state. secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = len(assetList) - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) diff --git a/itest/round_trip_send_test.go b/itest/round_trip_send_test.go index 87bdb5ca6..d312eed21 100644 --- a/itest/round_trip_send_test.go +++ b/itest/round_trip_send_test.go @@ -41,10 +41,6 @@ func testRoundTripSend(t *harnessTest) { // serve as the node which'll receive the assets. secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = len(rpcAssets) - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) diff --git a/itest/send_test.go b/itest/send_test.go index a57301dc6..c7ab48f26 100644 --- a/itest/send_test.go +++ b/itest/send_test.go @@ -88,10 +88,6 @@ func testBasicSendUnidirectional(t *harnessTest) { // node will be used to synchronize universe state. secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = len(rpcAssets) - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) @@ -215,8 +211,6 @@ func testRestartReceiverCheckBalance(t *harnessTest) { recvTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = len(rpcAssets) params.custodianProofRetrievalDelay = &custodianProofRetrievalDelay }, ) @@ -458,10 +452,6 @@ func testBasicSendPassiveAsset(t *harnessTest) { // Set up a new node that will serve as the receiving node. recvTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = len(rpcAssets) - }, ) defer func() { require.NoError(t.t, recvTapd.stop(!*noDelete)) @@ -1212,10 +1202,6 @@ func testMultiInputSendNonInteractiveSingleID(t *harnessTest) { // node. Sync the new node with the primary node. bobTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = len(rpcAssets) - }, ) defer func() { require.NoError(t.t, bobTapd.stop(!*noDelete)) @@ -1305,10 +1291,6 @@ func testSendMultipleCoins(t *harnessTest) { // node will be used to synchronize universe state. secondTapd := setupTapdHarness( t.t, t, t.lndHarness.Bob, t.universeServer, - func(params *tapdHarnessParams) { - params.startupSyncNode = t.tapd - params.startupSyncNumAssets = len(rpcAssets) - }, ) defer func() { require.NoError(t.t, secondTapd.stop(!*noDelete)) From 326c949282ac0eb5c5a228e63db630a59ea93040 Mon Sep 17 00:00:00 2001 From: ffranr Date: Mon, 13 Nov 2023 14:31:00 +0000 Subject: [PATCH 16/54] universe: fix comment spelling --- universe/auto_syncer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/universe/auto_syncer.go b/universe/auto_syncer.go index 9e88c029f..45dd53589 100644 --- a/universe/auto_syncer.go +++ b/universe/auto_syncer.go @@ -292,7 +292,7 @@ func (f *FederationEnvoy) syncer() { // to synchronize state with all the active universe servers in // the federation. case <-syncTicker.C: - // Error propogation is handled in tryFetchServers, we + // Error propagation is handled in tryFetchServers, we // only need to exit here. fedServers, err := f.tryFetchServers() if err != nil { From 0d8bf8acef173629d0c0c357c06395f65781340a Mon Sep 17 00:00:00 2001 From: ffranr Date: Mon, 13 Nov 2023 14:20:27 +0000 Subject: [PATCH 17/54] universe: add doc to fields --- universe/auto_syncer.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/universe/auto_syncer.go b/universe/auto_syncer.go index 45dd53589..72530ea23 100644 --- a/universe/auto_syncer.go +++ b/universe/auto_syncer.go @@ -71,8 +71,12 @@ type FederationPushReq struct { // Leaf is the new leaf to add. Leaf *Leaf + // resp is a channel that will be sent the asset issuance/transfer + // proof and corresponding universe/multiverse inclusion proofs if the + // federation proof push was successful. resp chan *Proof - err chan error + + err chan error } // FederationProofBatchPushReq is used to push out a batch of universe proof @@ -97,8 +101,12 @@ type FederationEnvoy struct { stopOnce sync.Once + // pushRequests is a channel that will be sent new requests to push out + // proof leaves to the federation. pushRequests chan *FederationPushReq + // batchPushRequests is a channel that will be sent new requests to push + // out batch proof leaves to the federation. batchPushRequests chan *FederationProofBatchPushReq } From 72e323393469e852c4503681736e57200af05f4f Mon Sep 17 00:00:00 2001 From: ffranr Date: Mon, 27 Nov 2023 11:56:17 +0000 Subject: [PATCH 18/54] tapdb: rename ListUniverseServers into QueryUniverseServers This commit renames `ListUniverseServers` into `QueryUniverseServers`. It also adds a `WHERE` clause to the SQL statement to allow us to filter on server host or row ID. --- tapdb/sqlc/querier.go | 2 +- tapdb/sqlc/queries/universe.sql | 7 ++-- tapdb/sqlc/universe.sql.go | 62 +++++++++++++++++++-------------- tapdb/universe_federation.go | 13 +++++-- 4 files changed, 51 insertions(+), 33 deletions(-) diff --git a/tapdb/sqlc/querier.go b/tapdb/sqlc/querier.go index ee001f911..1a64f4291 100644 --- a/tapdb/sqlc/querier.go +++ b/tapdb/sqlc/querier.go @@ -94,7 +94,6 @@ type Querier interface { InsertPassiveAsset(ctx context.Context, arg InsertPassiveAssetParams) error InsertRootKey(ctx context.Context, arg InsertRootKeyParams) error InsertUniverseServer(ctx context.Context, arg InsertUniverseServerParams) error - ListUniverseServers(ctx context.Context) ([]UniverseServer, error) LogProofTransferAttempt(ctx context.Context, arg LogProofTransferAttemptParams) error LogServerSync(ctx context.Context, arg LogServerSyncParams) error NewMintingBatch(ctx context.Context, arg NewMintingBatchParams) error @@ -129,6 +128,7 @@ type Querier interface { // root, simplifies queries QueryUniverseAssetStats(ctx context.Context, arg QueryUniverseAssetStatsParams) ([]QueryUniverseAssetStatsRow, error) QueryUniverseLeaves(ctx context.Context, arg QueryUniverseLeavesParams) ([]QueryUniverseLeavesRow, error) + QueryUniverseServers(ctx context.Context, arg QueryUniverseServersParams) ([]UniverseServer, error) QueryUniverseStats(ctx context.Context) (QueryUniverseStatsRow, error) ReAnchorPassiveAssets(ctx context.Context, arg ReAnchorPassiveAssetsParams) error SetAddrManaged(ctx context.Context, arg SetAddrManagedParams) error diff --git a/tapdb/sqlc/queries/universe.sql b/tapdb/sqlc/queries/universe.sql index cab652573..3376cd0bf 100644 --- a/tapdb/sqlc/queries/universe.sql +++ b/tapdb/sqlc/queries/universe.sql @@ -115,8 +115,11 @@ UPDATE universe_servers SET last_sync_time = @new_sync_time WHERE server_host = @target_server; --- name: ListUniverseServers :many -SELECT * FROM universe_servers; +-- name: QueryUniverseServers :many +SELECT * FROM universe_servers +WHERE (id = sqlc.narg('id') OR sqlc.narg('id') IS NULL) AND + (server_host = sqlc.narg('server_host') + OR sqlc.narg('server_host') IS NULL); -- name: InsertNewSyncEvent :exec WITH group_key_root_id AS ( diff --git a/tapdb/sqlc/universe.sql.go b/tapdb/sqlc/universe.sql.go index 53452cab5..a0e7167af 100644 --- a/tapdb/sqlc/universe.sql.go +++ b/tapdb/sqlc/universe.sql.go @@ -265,33 +265,6 @@ func (q *Queries) InsertUniverseServer(ctx context.Context, arg InsertUniverseSe return err } -const listUniverseServers = `-- name: ListUniverseServers :many -SELECT id, server_host, last_sync_time FROM universe_servers -` - -func (q *Queries) ListUniverseServers(ctx context.Context) ([]UniverseServer, error) { - rows, err := q.db.QueryContext(ctx, listUniverseServers) - if err != nil { - return nil, err - } - defer rows.Close() - var items []UniverseServer - for rows.Next() { - var i UniverseServer - if err := rows.Scan(&i.ID, &i.ServerHost, &i.LastSyncTime); err != nil { - return nil, err - } - items = append(items, i) - } - if err := rows.Close(); err != nil { - return nil, err - } - if err := rows.Err(); err != nil { - return nil, err - } - return items, nil -} - const logServerSync = `-- name: LogServerSync :exec UPDATE universe_servers SET last_sync_time = $1 @@ -688,6 +661,41 @@ func (q *Queries) QueryUniverseLeaves(ctx context.Context, arg QueryUniverseLeav return items, nil } +const queryUniverseServers = `-- name: QueryUniverseServers :many +SELECT id, server_host, last_sync_time FROM universe_servers +WHERE (id = $1 OR $1 IS NULL) AND + (server_host = $2 + OR $2 IS NULL) +` + +type QueryUniverseServersParams struct { + ID sql.NullInt64 + ServerHost sql.NullString +} + +func (q *Queries) QueryUniverseServers(ctx context.Context, arg QueryUniverseServersParams) ([]UniverseServer, error) { + rows, err := q.db.QueryContext(ctx, queryUniverseServers, arg.ID, arg.ServerHost) + if err != nil { + return nil, err + } + defer rows.Close() + var items []UniverseServer + for rows.Next() { + var i UniverseServer + if err := rows.Scan(&i.ID, &i.ServerHost, &i.LastSyncTime); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + const queryUniverseStats = `-- name: QueryUniverseStats :one WITH stats AS ( SELECT total_asset_syncs, total_asset_proofs diff --git a/tapdb/universe_federation.go b/tapdb/universe_federation.go index 5052ac1a0..c306baf30 100644 --- a/tapdb/universe_federation.go +++ b/tapdb/universe_federation.go @@ -40,6 +40,9 @@ type ( // FedUniSyncConfigs is the universe specific federation sync config // returned from a query. FedUniSyncConfigs = sqlc.FederationUniSyncConfig + + // QueryUniServersParams is used to query for universe servers. + QueryUniServersParams = sqlc.QueryUniverseServersParams ) var ( @@ -97,8 +100,10 @@ type UniverseServerStore interface { // LogServerSync marks that a server was just synced in the DB. LogServerSync(ctx context.Context, arg sqlc.LogServerSyncParams) error - // ListUniverseServers returns the total set of all universe servers. - ListUniverseServers(ctx context.Context) ([]sqlc.UniverseServer, error) + // QueryUniverseServers returns a set of universe servers. + QueryUniverseServers(ctx context.Context, + arg sqlc.QueryUniverseServersParams) ([]sqlc.UniverseServer, + error) } // UniverseFederationOptions is the database tx object for the universe server store. @@ -174,7 +179,9 @@ func (u *UniverseFederationDB) UniverseServers( readTx := NewUniverseFederationReadTx() dbErr := u.db.ExecTx(ctx, &readTx, func(db UniverseServerStore) error { - servers, err := db.ListUniverseServers(ctx) + servers, err := db.QueryUniverseServers( + ctx, QueryUniServersParams{}, + ) if err != nil { return err } From 4a37769a42636f7ee53ca5ae6fbc63aa9929b513 Mon Sep 17 00:00:00 2001 From: ffranr Date: Mon, 27 Nov 2023 12:52:59 +0000 Subject: [PATCH 19/54] universe: add function to derive universe ID from bytes This commit adds a new function called `NewUniIDFromRawArgs`. The function can be used to derive a universe identifier from raw asset ID/group key bytes. We will use this function to derive a universe ID from data stored in the database. --- universe/interface.go | 60 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/universe/interface.go b/universe/interface.go index 9abb76878..15abcea50 100644 --- a/universe/interface.go +++ b/universe/interface.go @@ -84,6 +84,66 @@ func (i *Identifier) StringForLog() string { i.String(), i.AssetID[:], groupKey, i.ProofType) } +// NewUniIDFromRawArgs creates a new universe ID from the raw arguments. The +// asset ID bytes and group key bytes are mutually exclusive. If the group key +// bytes are set, then the asset ID bytes will be ignored. +// This function is useful in deriving a universe ID from the data stored in the +// database. +func NewUniIDFromRawArgs(assetIDBytes []byte, groupKeyBytes []byte, + proofTypeStr string) (Identifier, error) { + + proofType, err := ParseStrProofType(proofTypeStr) + if err != nil { + return Identifier{}, err + } + + // If the group key bytes are set, then we'll preferentially populate + // the universe ID with that and not the asset ID. + if len(groupKeyBytes) != 0 { + groupKey, err := parseGroupKey(groupKeyBytes) + if err != nil { + return Identifier{}, fmt.Errorf("unable to parse "+ + "group key: %w", err) + } + return Identifier{ + GroupKey: groupKey, + ProofType: proofType, + }, nil + } + + // At this point we know that the group key bytes are nil, so we'll + // attempt to parse the asset ID bytes. + if len(assetIDBytes) == 0 { + return Identifier{}, fmt.Errorf("asset ID bytes and group " + + "key bytes are both nil") + } + + var assetID asset.ID + copy(assetID[:], assetIDBytes) + + return Identifier{ + AssetID: assetID, + ProofType: proofType, + }, nil +} + +// parseGroupKey parses a group key from bytes, which can be in either the +// Schnorr or Compressed format. +func parseGroupKey(scriptKey []byte) (*btcec.PublicKey, error) { + switch len(scriptKey) { + case schnorr.PubKeyBytesLen: + return schnorr.ParsePubKey(scriptKey) + + // Truncate the key and then parse as a Schnorr key. + case btcec.PubKeyBytesLenCompressed: + return schnorr.ParsePubKey(scriptKey[1:]) + + default: + return nil, fmt.Errorf("unknown script key length: %v", + len(scriptKey)) + } +} + // ValidateProofUniverseType validates that the proof type matches the universe // identifier proof type. func ValidateProofUniverseType(a *asset.Asset, uniID Identifier) error { From ec4203bd369517dfe74f28e6060faf970d642cfd Mon Sep 17 00:00:00 2001 From: ffranr Date: Mon, 27 Nov 2023 12:03:44 +0000 Subject: [PATCH 20/54] universe+tapdb: add universe federation proof sync log table This commit also adds log query/upserts SQL statements. --- ...00013_universe_fed_proof_sync_log.down.sql | 2 + .../000013_universe_fed_proof_sync_log.up.sql | 36 +++ tapdb/sqlc/models.go | 11 + tapdb/sqlc/querier.go | 4 + tapdb/sqlc/queries/universe.sql | 98 +++++- tapdb/sqlc/universe.sql.go | 198 ++++++++++++ tapdb/universe_federation.go | 297 ++++++++++++++++++ universe/interface.go | 103 +++++- 8 files changed, 746 insertions(+), 3 deletions(-) create mode 100644 tapdb/sqlc/migrations/000013_universe_fed_proof_sync_log.down.sql create mode 100644 tapdb/sqlc/migrations/000013_universe_fed_proof_sync_log.up.sql diff --git a/tapdb/sqlc/migrations/000013_universe_fed_proof_sync_log.down.sql b/tapdb/sqlc/migrations/000013_universe_fed_proof_sync_log.down.sql new file mode 100644 index 000000000..42bdbfbb8 --- /dev/null +++ b/tapdb/sqlc/migrations/000013_universe_fed_proof_sync_log.down.sql @@ -0,0 +1,2 @@ +DROP INDEX IF EXISTS federation_proof_sync_log_unique_index_proof_leaf_id_servers_id; +DROP TABLE IF EXISTS federation_proof_sync_log; \ No newline at end of file diff --git a/tapdb/sqlc/migrations/000013_universe_fed_proof_sync_log.up.sql b/tapdb/sqlc/migrations/000013_universe_fed_proof_sync_log.up.sql new file mode 100644 index 000000000..ba7e7c100 --- /dev/null +++ b/tapdb/sqlc/migrations/000013_universe_fed_proof_sync_log.up.sql @@ -0,0 +1,36 @@ +-- This table stores the log of federation universe proof sync attempts. Rows +-- in this table are specific to a given proof leaf, server, and sync direction. +CREATE TABLE IF NOT EXISTS federation_proof_sync_log ( + id BIGINT PRIMARY KEY, + + -- The status of the proof sync attempt. + status TEXT NOT NULL CHECK(status IN ('pending', 'complete')), + + -- The timestamp of when the log entry for the associated proof was last + -- updated. + timestamp TIMESTAMP NOT NULL, + + -- The number of attempts that have been made to sync the proof. + attempt_counter BIGINT NOT NULL DEFAULT 0, + + -- The direction of the proof sync attempt. + sync_direction TEXT NOT NULL CHECK(sync_direction IN ('push', 'pull')), + + -- The ID of the subject proof leaf. + proof_leaf_id BIGINT NOT NULL REFERENCES universe_leaves(id), + + -- The ID of the universe that the proof leaf belongs to. + universe_root_id BIGINT NOT NULL REFERENCES universe_roots(id), + + -- The ID of the server that the proof will be/was synced to. + servers_id BIGINT NOT NULL REFERENCES universe_servers(id) +); + +-- Create a unique index on table federation_proof_sync_log +CREATE UNIQUE INDEX federation_proof_sync_log_unique_index_proof_leaf_id_servers_id +ON federation_proof_sync_log ( + sync_direction, + proof_leaf_id, + universe_root_id, + servers_id +); \ No newline at end of file diff --git a/tapdb/sqlc/models.go b/tapdb/sqlc/models.go index b89980ab2..26eef9b5d 100644 --- a/tapdb/sqlc/models.go +++ b/tapdb/sqlc/models.go @@ -164,6 +164,17 @@ type FederationGlobalSyncConfig struct { AllowSyncExport bool } +type FederationProofSyncLog struct { + ID int64 + Status string + Timestamp time.Time + AttemptCounter int64 + SyncDirection string + ProofLeafID int64 + UniverseRootID int64 + ServersID int64 +} + type FederationUniSyncConfig struct { Namespace string AssetID []byte diff --git a/tapdb/sqlc/querier.go b/tapdb/sqlc/querier.go index 1a64f4291..c20633c10 100644 --- a/tapdb/sqlc/querier.go +++ b/tapdb/sqlc/querier.go @@ -121,6 +121,9 @@ type Querier interface { QueryAssets(ctx context.Context, arg QueryAssetsParams) ([]QueryAssetsRow, error) QueryEventIDs(ctx context.Context, arg QueryEventIDsParams) ([]QueryEventIDsRow, error) QueryFederationGlobalSyncConfigs(ctx context.Context) ([]FederationGlobalSyncConfig, error) + // Join on mssmt_nodes to get leaf related fields. + // Join on genesis_info_view to get leaf related fields. + QueryFederationProofSyncLog(ctx context.Context, arg QueryFederationProofSyncLogParams) ([]QueryFederationProofSyncLogRow, error) QueryFederationUniSyncConfigs(ctx context.Context) ([]FederationUniSyncConfig, error) QueryPassiveAssets(ctx context.Context, transferID int64) ([]QueryPassiveAssetsRow, error) QueryProofTransferAttempts(ctx context.Context, arg QueryProofTransferAttemptsParams) ([]time.Time, error) @@ -145,6 +148,7 @@ type Querier interface { UpsertAssetProof(ctx context.Context, arg UpsertAssetProofParams) error UpsertChainTx(ctx context.Context, arg UpsertChainTxParams) (int64, error) UpsertFederationGlobalSyncConfig(ctx context.Context, arg UpsertFederationGlobalSyncConfigParams) error + UpsertFederationProofSyncLog(ctx context.Context, arg UpsertFederationProofSyncLogParams) (int64, error) UpsertFederationUniSyncConfig(ctx context.Context, arg UpsertFederationUniSyncConfigParams) error UpsertGenesisAsset(ctx context.Context, arg UpsertGenesisAssetParams) (int64, error) UpsertGenesisPoint(ctx context.Context, prevOut []byte) (int64, error) diff --git a/tapdb/sqlc/queries/universe.sql b/tapdb/sqlc/queries/universe.sql index 3376cd0bf..3fd9a75b2 100644 --- a/tapdb/sqlc/queries/universe.sql +++ b/tapdb/sqlc/queries/universe.sql @@ -364,4 +364,100 @@ ON CONFLICT(namespace) -- name: QueryFederationUniSyncConfigs :many SELECT namespace, asset_id, group_key, proof_type, allow_sync_insert, allow_sync_export FROM federation_uni_sync_config -ORDER BY group_key NULLS LAST, asset_id NULLS LAST, proof_type; \ No newline at end of file +ORDER BY group_key NULLS LAST, asset_id NULLS LAST, proof_type; + +-- name: UpsertFederationProofSyncLog :one +INSERT INTO federation_proof_sync_log as log ( + status, timestamp, sync_direction, proof_leaf_id, universe_root_id, + servers_id +) VALUES ( + @status, @timestamp, @sync_direction, + ( + -- Select the leaf id from the universe_leaves table. + SELECT id + FROM universe_leaves + WHERE leaf_node_namespace = @leaf_namespace + AND minting_point = @leaf_minting_point_bytes + AND script_key_bytes = @leaf_script_key_bytes + LIMIT 1 + ), + ( + -- Select the universe root id from the universe_roots table. + SELECT id + FROM universe_roots + WHERE namespace_root = @universe_id_namespace + LIMIT 1 + ), + ( + -- Select the server id from the universe_servers table. + SELECT id + FROM universe_servers + WHERE server_host = @server_host + LIMIT 1 + ) +) ON CONFLICT (sync_direction, proof_leaf_id, universe_root_id, servers_id) +DO UPDATE SET + status = EXCLUDED.status, + timestamp = EXCLUDED.timestamp, + -- Increment the attempt counter. + attempt_counter = CASE + WHEN @bump_sync_attempt_counter = true THEN log.attempt_counter + 1 + ELSE log.attempt_counter + END +RETURNING id; + +-- name: QueryFederationProofSyncLog :many +SELECT + log.id, status, timestamp, sync_direction, attempt_counter, + + -- Select fields from the universe_servers table. + server.id as server_id, + server.server_host, + + -- Select universe leaf related fields. + leaf.minting_point as leaf_minting_point_bytes, + leaf.script_key_bytes as leaf_script_key_bytes, + mssmt_node.value as leaf_genesis_proof, + genesis.gen_asset_id as leaf_gen_asset_id, + genesis.asset_id as leaf_asset_id, + + -- Select fields from the universe_roots table. + root.asset_id as uni_asset_id, + root.group_key as uni_group_key, + root.proof_type as uni_proof_type + +FROM federation_proof_sync_log as log + +JOIN universe_leaves as leaf + ON leaf.id = log.proof_leaf_id + +-- Join on mssmt_nodes to get leaf related fields. +JOIN mssmt_nodes mssmt_node + ON leaf.leaf_node_key = mssmt_node.key AND + leaf.leaf_node_namespace = mssmt_node.namespace + +-- Join on genesis_info_view to get leaf related fields. +JOIN genesis_info_view genesis + ON leaf.asset_genesis_id = genesis.gen_asset_id + +JOIN universe_servers as server + ON server.id = log.servers_id + +JOIN universe_roots as root + ON root.id = log.universe_root_id + +WHERE (log.sync_direction = sqlc.narg('sync_direction') + OR sqlc.narg('sync_direction') IS NULL) + AND + (log.status = sqlc.narg('status') OR sqlc.narg('status') IS NULL) + AND + + -- Universe leaves WHERE clauses. + (leaf.leaf_node_namespace = sqlc.narg('leaf_namespace') + OR sqlc.narg('leaf_namespace') IS NULL) + AND + (leaf.minting_point = sqlc.narg('leaf_minting_point_bytes') + OR sqlc.narg('leaf_minting_point_bytes') IS NULL) + AND + (leaf.script_key_bytes = sqlc.narg('leaf_script_key_bytes') + OR sqlc.narg('leaf_script_key_bytes') IS NULL); \ No newline at end of file diff --git a/tapdb/sqlc/universe.sql.go b/tapdb/sqlc/universe.sql.go index a0e7167af..9c5c21339 100644 --- a/tapdb/sqlc/universe.sql.go +++ b/tapdb/sqlc/universe.sql.go @@ -402,6 +402,134 @@ func (q *Queries) QueryFederationGlobalSyncConfigs(ctx context.Context) ([]Feder return items, nil } +const queryFederationProofSyncLog = `-- name: QueryFederationProofSyncLog :many +SELECT + log.id, status, timestamp, sync_direction, attempt_counter, + + -- Select fields from the universe_servers table. + server.id as server_id, + server.server_host, + + -- Select universe leaf related fields. + leaf.minting_point as leaf_minting_point_bytes, + leaf.script_key_bytes as leaf_script_key_bytes, + mssmt_node.value as leaf_genesis_proof, + genesis.gen_asset_id as leaf_gen_asset_id, + genesis.asset_id as leaf_asset_id, + + -- Select fields from the universe_roots table. + root.asset_id as uni_asset_id, + root.group_key as uni_group_key, + root.proof_type as uni_proof_type + +FROM federation_proof_sync_log as log + +JOIN universe_leaves as leaf + ON leaf.id = log.proof_leaf_id + +JOIN mssmt_nodes mssmt_node + ON leaf.leaf_node_key = mssmt_node.key AND + leaf.leaf_node_namespace = mssmt_node.namespace + +JOIN genesis_info_view genesis + ON leaf.asset_genesis_id = genesis.gen_asset_id + +JOIN universe_servers as server + ON server.id = log.servers_id + +JOIN universe_roots as root + ON root.id = log.universe_root_id + +WHERE (log.sync_direction = $1 + OR $1 IS NULL) + AND + (log.status = $2 OR $2 IS NULL) + AND + + -- Universe leaves WHERE clauses. + (leaf.leaf_node_namespace = $3 + OR $3 IS NULL) + AND + (leaf.minting_point = $4 + OR $4 IS NULL) + AND + (leaf.script_key_bytes = $5 + OR $5 IS NULL) +` + +type QueryFederationProofSyncLogParams struct { + SyncDirection sql.NullString + Status sql.NullString + LeafNamespace sql.NullString + LeafMintingPointBytes []byte + LeafScriptKeyBytes []byte +} + +type QueryFederationProofSyncLogRow struct { + ID int64 + Status string + Timestamp time.Time + SyncDirection string + AttemptCounter int64 + ServerID int64 + ServerHost string + LeafMintingPointBytes []byte + LeafScriptKeyBytes []byte + LeafGenesisProof []byte + LeafGenAssetID int64 + LeafAssetID []byte + UniAssetID []byte + UniGroupKey []byte + UniProofType string +} + +// Join on mssmt_nodes to get leaf related fields. +// Join on genesis_info_view to get leaf related fields. +func (q *Queries) QueryFederationProofSyncLog(ctx context.Context, arg QueryFederationProofSyncLogParams) ([]QueryFederationProofSyncLogRow, error) { + rows, err := q.db.QueryContext(ctx, queryFederationProofSyncLog, + arg.SyncDirection, + arg.Status, + arg.LeafNamespace, + arg.LeafMintingPointBytes, + arg.LeafScriptKeyBytes, + ) + if err != nil { + return nil, err + } + defer rows.Close() + var items []QueryFederationProofSyncLogRow + for rows.Next() { + var i QueryFederationProofSyncLogRow + if err := rows.Scan( + &i.ID, + &i.Status, + &i.Timestamp, + &i.SyncDirection, + &i.AttemptCounter, + &i.ServerID, + &i.ServerHost, + &i.LeafMintingPointBytes, + &i.LeafScriptKeyBytes, + &i.LeafGenesisProof, + &i.LeafGenAssetID, + &i.LeafAssetID, + &i.UniAssetID, + &i.UniGroupKey, + &i.UniProofType, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + const queryFederationUniSyncConfigs = `-- name: QueryFederationUniSyncConfigs :many SELECT namespace, asset_id, group_key, proof_type, allow_sync_insert, allow_sync_export FROM federation_uni_sync_config @@ -877,6 +1005,76 @@ func (q *Queries) UpsertFederationGlobalSyncConfig(ctx context.Context, arg Upse return err } +const upsertFederationProofSyncLog = `-- name: UpsertFederationProofSyncLog :one +INSERT INTO federation_proof_sync_log as log ( + status, timestamp, sync_direction, proof_leaf_id, universe_root_id, + servers_id +) VALUES ( + $1, $2, $3, + ( + -- Select the leaf id from the universe_leaves table. + SELECT id + FROM universe_leaves + WHERE leaf_node_namespace = $4 + AND minting_point = $5 + AND script_key_bytes = $6 + LIMIT 1 + ), + ( + -- Select the universe root id from the universe_roots table. + SELECT id + FROM universe_roots + WHERE namespace_root = $7 + LIMIT 1 + ), + ( + -- Select the server id from the universe_servers table. + SELECT id + FROM universe_servers + WHERE server_host = $8 + LIMIT 1 + ) +) ON CONFLICT (sync_direction, proof_leaf_id, universe_root_id, servers_id) +DO UPDATE SET + status = EXCLUDED.status, + timestamp = EXCLUDED.timestamp, + -- Increment the attempt counter. + attempt_counter = CASE + WHEN $9 = true THEN log.attempt_counter + 1 + ELSE log.attempt_counter + END +RETURNING id +` + +type UpsertFederationProofSyncLogParams struct { + Status string + Timestamp time.Time + SyncDirection string + LeafNamespace string + LeafMintingPointBytes []byte + LeafScriptKeyBytes []byte + UniverseIDNamespace string + ServerHost string + BumpSyncAttemptCounter interface{} +} + +func (q *Queries) UpsertFederationProofSyncLog(ctx context.Context, arg UpsertFederationProofSyncLogParams) (int64, error) { + row := q.db.QueryRowContext(ctx, upsertFederationProofSyncLog, + arg.Status, + arg.Timestamp, + arg.SyncDirection, + arg.LeafNamespace, + arg.LeafMintingPointBytes, + arg.LeafScriptKeyBytes, + arg.UniverseIDNamespace, + arg.ServerHost, + arg.BumpSyncAttemptCounter, + ) + var id int64 + err := row.Scan(&id) + return id, err +} + const upsertFederationUniSyncConfig = `-- name: UpsertFederationUniSyncConfig :exec INSERT INTO federation_uni_sync_config ( namespace, asset_id, group_key, proof_type, allow_sync_insert, allow_sync_export diff --git a/tapdb/universe_federation.go b/tapdb/universe_federation.go index c306baf30..a7b5d49f3 100644 --- a/tapdb/universe_federation.go +++ b/tapdb/universe_federation.go @@ -1,7 +1,9 @@ package tapdb import ( + "bytes" "context" + "database/sql" "errors" "fmt" "sort" @@ -9,8 +11,11 @@ import ( "time" "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec/v2/schnorr" + "github.com/btcsuite/btcd/wire" "github.com/lightninglabs/taproot-assets/asset" "github.com/lightninglabs/taproot-assets/fn" + "github.com/lightninglabs/taproot-assets/proof" "github.com/lightninglabs/taproot-assets/tapdb/sqlc" "github.com/lightninglabs/taproot-assets/universe" "github.com/lightningnetwork/lnd/clock" @@ -19,6 +24,17 @@ import ( ) type ( + // UpsertFedProofSyncLogParams is used to upsert federation proof sync + // logs. + UpsertFedProofSyncLogParams = sqlc.UpsertFederationProofSyncLogParams + + // QueryFedProofSyncLogParams is used to query for federation proof sync + // logs. + QueryFedProofSyncLogParams = sqlc.QueryFederationProofSyncLogParams + + // ProofSyncLogEntry is a single entry from the proof sync log. + ProofSyncLogEntry = sqlc.QueryFederationProofSyncLogRow + // NewUniverseServer is used to create a new universe server. NewUniverseServer = sqlc.InsertUniverseServerParams @@ -62,6 +78,22 @@ var ( } ) +// FederationProofSyncLogStore is used to log the sync status of individual +// universe proofs. +type FederationProofSyncLogStore interface { + BaseUniverseStore + + // UpsertFederationProofSyncLog upserts a proof sync log entry for a + // given proof leaf and server. + UpsertFederationProofSyncLog(ctx context.Context, + arg UpsertFedProofSyncLogParams) (int64, error) + + // QueryFederationProofSyncLog returns the set of proof sync logs for a + // given proof leaf. + QueryFederationProofSyncLog(ctx context.Context, + arg QueryFedProofSyncLogParams) ([]ProofSyncLogEntry, error) +} + // FederationSyncConfigStore is used to manage the set of Universe servers as // part of a federation. type FederationSyncConfigStore interface { @@ -90,6 +122,7 @@ type FederationSyncConfigStore interface { // of a federation. type UniverseServerStore interface { FederationSyncConfigStore + FederationProofSyncLogStore // InsertUniverseServer inserts a new universe server in to the DB. InsertUniverseServer(ctx context.Context, arg NewUniverseServer) error @@ -268,6 +301,270 @@ func (u *UniverseFederationDB) LogNewSyncs(ctx context.Context, }) } +// UpsertFederationProofSyncLog upserts a federation proof sync log entry for a +// given universe server and proof. +func (u *UniverseFederationDB) UpsertFederationProofSyncLog( + ctx context.Context, uniID universe.Identifier, + leafKey universe.LeafKey, addr universe.ServerAddr, + syncDirection universe.SyncDirection, + syncStatus universe.ProofSyncStatus, + bumpSyncAttemptCounter bool) (int64, error) { + + // Encode the leaf key outpoint as bytes. We'll use this to look up the + // leaf ID in the DB. + leafKeyOutpointBytes, err := encodeOutpoint(leafKey.OutPoint) + if err != nil { + return 0, err + } + + // Encode the leaf script key pub key as bytes. We'll use this to look + // up the leaf ID in the DB. + scriptKeyPubKeyBytes := schnorr.SerializePubKey( + leafKey.ScriptKey.PubKey, + ) + + var ( + writeTx UniverseFederationOptions + logID int64 + ) + + err = u.db.ExecTx(ctx, &writeTx, func(db UniverseServerStore) error { + params := UpsertFedProofSyncLogParams{ + Status: string(syncStatus), + Timestamp: time.Now().UTC(), + SyncDirection: string(syncDirection), + UniverseIDNamespace: uniID.String(), + LeafNamespace: uniID.String(), + LeafMintingPointBytes: leafKeyOutpointBytes, + LeafScriptKeyBytes: scriptKeyPubKeyBytes, + ServerHost: addr.HostStr(), + BumpSyncAttemptCounter: bumpSyncAttemptCounter, + } + logID, err = db.UpsertFederationProofSyncLog(ctx, params) + if err != nil { + return err + } + + return nil + }) + + return logID, err +} + +// QueryFederationProofSyncLog queries the federation proof sync log and returns +// the log entries which correspond to the given universe proof leaf. +func (u *UniverseFederationDB) QueryFederationProofSyncLog( + ctx context.Context, uniID universe.Identifier, + leafKey universe.LeafKey, + syncDirection universe.SyncDirection, + syncStatus universe.ProofSyncStatus) ([]*universe.ProofSyncLogEntry, + error) { + + // Encode the leaf key outpoint as bytes. We'll use this to look up the + // leaf ID in the DB. + leafKeyOutpointBytes, err := encodeOutpoint(leafKey.OutPoint) + if err != nil { + return nil, err + } + + // Encode the leaf script key pub key as bytes. We'll use this to look + // up the leaf ID in the DB. + scriptKeyPubKeyBytes := schnorr.SerializePubKey( + leafKey.ScriptKey.PubKey, + ) + + var ( + readTx = NewUniverseFederationReadTx() + proofSyncLogs []*universe.ProofSyncLogEntry + ) + + err = u.db.ExecTx(ctx, &readTx, func(db UniverseServerStore) error { + params := QueryFedProofSyncLogParams{ + SyncDirection: sqlStr(string(syncDirection)), + Status: sqlStr(string(syncStatus)), + LeafNamespace: sqlStr(uniID.String()), + LeafMintingPointBytes: leafKeyOutpointBytes, + LeafScriptKeyBytes: scriptKeyPubKeyBytes, + } + logEntries, err := db.QueryFederationProofSyncLog(ctx, params) + + // Parse database proof sync logs. Multiple log entries may + // exist for a given leaf because each log entry is unique to a + // server. + proofSyncLogs = make( + []*universe.ProofSyncLogEntry, 0, len(logEntries), + ) + for idx := range logEntries { + entry := logEntries[idx] + + parsedLogEntry, err := fetchProofSyncLogEntry( + ctx, entry, db, + ) + if err != nil { + return err + } + + proofSyncLogs = append(proofSyncLogs, parsedLogEntry) + } + + return err + }) + if err != nil { + return nil, err + } + + return proofSyncLogs, nil +} + +// FetchPendingProofsSyncLog queries the federation proof sync log and returns +// all log entries with sync status pending. +func (u *UniverseFederationDB) FetchPendingProofsSyncLog(ctx context.Context, + syncDirection *universe.SyncDirection) ([]*universe.ProofSyncLogEntry, + error) { + + var ( + readTx = NewUniverseFederationReadTx() + proofSyncLogs []*universe.ProofSyncLogEntry + ) + + err := u.db.ExecTx(ctx, &readTx, func(db UniverseServerStore) error { + // If the sync direction is not set, then we'll query for all + // pending proof sync log entries. + var sqlSyncDirection sql.NullString + if syncDirection != nil { + sqlSyncDirection = sqlStr(string(*syncDirection)) + } + + sqlProofSyncStatus := sqlStr( + string(universe.ProofSyncStatusPending), + ) + + params := QueryFedProofSyncLogParams{ + SyncDirection: sqlSyncDirection, + Status: sqlProofSyncStatus, + } + logEntries, err := db.QueryFederationProofSyncLog(ctx, params) + if err != nil { + return fmt.Errorf("unable to query proof sync log: %w", + err) + } + + // Parse log entries from database row. + proofSyncLogs = make( + []*universe.ProofSyncLogEntry, 0, len(logEntries), + ) + for idx := range logEntries { + entry := logEntries[idx] + + parsedLogEntry, err := fetchProofSyncLogEntry( + ctx, entry, db, + ) + if err != nil { + return err + } + + proofSyncLogs = append(proofSyncLogs, parsedLogEntry) + } + + return nil + }) + if err != nil { + return nil, err + } + + return proofSyncLogs, nil +} + +// fetchProofSyncLogEntry returns a proof sync log entry given a DB row. +func fetchProofSyncLogEntry(ctx context.Context, entry ProofSyncLogEntry, + dbTx UniverseServerStore) (*universe.ProofSyncLogEntry, error) { + + // Fetch asset genesis for the leaf. + leafAssetGen, err := fetchGenesis(ctx, dbTx, entry.LeafGenAssetID) + if err != nil { + return nil, err + } + + // We only need to obtain the asset at this point, so we'll do a sparse + // decode here to decode only the asset record. + var leafAsset asset.Asset + assetRecord := proof.AssetLeafRecord(&leafAsset) + err = proof.SparseDecode( + bytes.NewReader(entry.LeafGenesisProof), assetRecord, + ) + if err != nil { + return nil, fmt.Errorf("unable to decode proof: %w", err) + } + + leaf := &universe.Leaf{ + GenesisWithGroup: universe.GenesisWithGroup{ + Genesis: leafAssetGen, + GroupKey: leafAsset.GroupKey, + }, + RawProof: entry.LeafGenesisProof, + Asset: &leafAsset, + Amt: leafAsset.Amount, + } + + // Parse leaf key from leaf DB row. + scriptKeyPub, err := schnorr.ParsePubKey( + entry.LeafScriptKeyBytes, + ) + if err != nil { + return nil, err + } + scriptKey := asset.NewScriptKey(scriptKeyPub) + + var outPoint wire.OutPoint + err = readOutPoint( + bytes.NewReader(entry.LeafMintingPointBytes), 0, 0, + &outPoint, + ) + if err != nil { + return nil, err + } + + leafKey := universe.LeafKey{ + OutPoint: outPoint, + ScriptKey: &scriptKey, + } + + // Parse server address from DB row. + serverAddr := universe.NewServerAddr(entry.ServerID, entry.ServerHost) + + // Parse proof sync status directly from the DB row. + status, err := universe.ParseStrProofSyncStatus(entry.Status) + if err != nil { + return nil, err + } + + // Parse proof sync direction directly from the DB row. + direction, err := universe.ParseStrSyncDirection(entry.SyncDirection) + if err != nil { + return nil, err + } + + uniID, err := universe.NewUniIDFromRawArgs( + entry.UniAssetID, entry.UniGroupKey, + entry.UniProofType, + ) + if err != nil { + return nil, err + } + + return &universe.ProofSyncLogEntry{ + Timestamp: entry.Timestamp, + SyncStatus: status, + SyncDirection: direction, + AttemptCounter: entry.AttemptCounter, + ServerAddr: serverAddr, + + UniID: uniID, + LeafKey: leafKey, + Leaf: *leaf, + }, nil +} + // UpsertFederationSyncConfig upserts both the global and universe specific // federation sync configs. func (u *UniverseFederationDB) UpsertFederationSyncConfig( diff --git a/universe/interface.go b/universe/interface.go index 15abcea50..0e8ae8181 100644 --- a/universe/interface.go +++ b/universe/interface.go @@ -763,10 +763,109 @@ type FederationSyncConfigDB interface { uniSyncConfigs []*FedUniSyncConfig) error } -// FederationDB is used for CRUD operations related to federation sync config -// and tracked servers. +// SyncDirection is the direction of a proof sync. +type SyncDirection string + +const ( + // SyncDirectionPush indicates that the sync is a push sync (from the local + // server to the remote server). + SyncDirectionPush SyncDirection = "push" + + // SyncDirectionPull indicates that the sync is a pull sync (from the remote + // server to the local server). + SyncDirectionPull SyncDirection = "pull" +) + +// ParseStrSyncDirection parses a string into a SyncDirection. +func ParseStrSyncDirection(s string) (SyncDirection, error) { + switch s { + case string(SyncDirectionPush): + return SyncDirectionPush, nil + case string(SyncDirectionPull): + return SyncDirectionPull, nil + default: + return "", fmt.Errorf("unknown sync direction: %v", s) + } +} + +// ProofSyncStatus is the status of a proof sync. +type ProofSyncStatus string + +const ( + // ProofSyncStatusPending indicates that the sync is pending. + ProofSyncStatusPending ProofSyncStatus = "pending" + + // ProofSyncStatusComplete indicates that the sync is complete. + ProofSyncStatusComplete ProofSyncStatus = "complete" +) + +// ParseStrProofSyncStatus parses a string into a ProofSyncStatus. +func ParseStrProofSyncStatus(s string) (ProofSyncStatus, error) { + switch s { + case string(ProofSyncStatusPending): + return ProofSyncStatusPending, nil + case string(ProofSyncStatusComplete): + return ProofSyncStatusComplete, nil + default: + return "", fmt.Errorf("unknown proof sync status: %v", s) + } +} + +// ProofSyncLogEntry is a log entry for a proof sync. +type ProofSyncLogEntry struct { + // Timestamp is the timestamp of the log entry. + Timestamp time.Time + + // SyncStatus is the status of the sync. + SyncStatus ProofSyncStatus + + // SyncDirection is the direction of the sync. + SyncDirection SyncDirection + + // AttemptCounter is the number of times the sync has been attempted. + AttemptCounter int64 + + // ServerAddr is the address of the sync counterparty server. + ServerAddr ServerAddr + + // UniID is the identifier of the universe associated with the sync event. + UniID Identifier + + // LeafKey is the leaf key associated with the sync event. + LeafKey LeafKey + + // Leaf is the leaf associated with the sync event. + Leaf Leaf +} + +// FederationProofSyncLog is used for CRUD operations relating to the federation +// proof sync log. +type FederationProofSyncLog interface { + // UpsertFederationProofSyncLog upserts a federation proof sync log + // entry for a given universe server and proof. + UpsertFederationProofSyncLog(ctx context.Context, uniID Identifier, + leafKey LeafKey, addr ServerAddr, syncDirection SyncDirection, + syncStatus ProofSyncStatus, + bumpSyncAttemptCounter bool) (int64, error) + + // QueryFederationProofSyncLog queries the federation proof sync log and + // returns the log entries which correspond to the given universe proof + // leaf. + QueryFederationProofSyncLog(ctx context.Context, uniID Identifier, + leafKey LeafKey, syncDirection SyncDirection, + syncStatus ProofSyncStatus) ([]*ProofSyncLogEntry, error) + + // FetchPendingProofsSyncLog queries the federation proof sync log and + // returns all log entries with sync status pending. + FetchPendingProofsSyncLog(ctx context.Context, + syncDirection *SyncDirection) ([]*ProofSyncLogEntry, error) +} + +// FederationDB is used for CRUD operations related to federation logs and +// configuration. type FederationDB interface { FederationLog + FederationProofSyncLog FederationSyncConfigDB } From 825b7c7636213b8f9e14927f841243395ae001ef Mon Sep 17 00:00:00 2001 From: ffranr Date: Mon, 27 Nov 2023 14:33:48 +0000 Subject: [PATCH 21/54] universe: use proof log db table in federation envoy This commit adds a flag to the federation universe push request to indicate that the proof leaf sync attempt should be logged and actively managed to ensure that the federation push procedure is repeated in the event of a failure. --- tapgarden/caretaker.go | 5 + universe/auto_syncer.go | 319 +++++++++++++++++++++++++++++++++++----- universe/interface.go | 6 + 3 files changed, 290 insertions(+), 40 deletions(-) diff --git a/tapgarden/caretaker.go b/tapgarden/caretaker.go index fb1c1c65a..78b867174 100644 --- a/tapgarden/caretaker.go +++ b/tapgarden/caretaker.go @@ -1179,6 +1179,11 @@ func (b *BatchCaretaker) storeMintingProof(ctx context.Context, ID: uniID, Key: leafKey, Leaf: mintingLeaf, + + // We set this to true to indicate that we would like the syncer + // to log and reattempt (in the event of a failure) to push sync + // this proof leaf. + LogProofSync: true, }, nil } diff --git a/universe/auto_syncer.go b/universe/auto_syncer.go index 72530ea23..23934d826 100644 --- a/universe/auto_syncer.go +++ b/universe/auto_syncer.go @@ -76,6 +76,12 @@ type FederationPushReq struct { // federation proof push was successful. resp chan *Proof + // LogProofSync is a boolean that indicates, if true, that the proof + // leaf sync attempt should be logged and actively managed to ensure + // that the federation push procedure is repeated in the event of a + // failure. + LogProofSync bool + err chan error } @@ -232,48 +238,105 @@ func (f *FederationEnvoy) syncServerState(ctx context.Context, return nil } -// pushProofToFederation attempts to push out a new proof to the current -// federation in parallel. -func (f *FederationEnvoy) pushProofToFederation(uniID Identifier, key LeafKey, - leaf *Leaf) { +// pushProofToServer attempts to push out a new proof to the target server. +func (f *FederationEnvoy) pushProofToServer(ctx context.Context, + uniID Identifier, key LeafKey, leaf *Leaf, addr ServerAddr) error { - // Fetch all universe servers in our federation. - fedServers, err := f.tryFetchServers() - if err != nil || len(fedServers) == 0 { - return + remoteUniverseServer, err := f.cfg.NewRemoteRegistrar(addr) + if err != nil { + return fmt.Errorf("cannot push proof unable to connect "+ + "to remote server(%v): %w", addr.HostStr(), err) } - log.Infof("Pushing new proof to %v federation members, proof_key=%v", - len(fedServers), spew.Sdump(key)) + _, err = remoteUniverseServer.UpsertProofLeaf( + ctx, uniID, key, leaf, + ) + if err != nil { + return fmt.Errorf("cannot push proof to remote "+ + "server(%v): %w", addr.HostStr(), err) + } - ctx, cancel := f.WithCtxQuitNoTimeout() - defer cancel() + return nil +} + +// pushProofToServerLogged attempts to push out a new proof to the target +// server, and logs the sync attempt. +func (f *FederationEnvoy) pushProofToServerLogged(ctx context.Context, + uniID Identifier, key LeafKey, leaf *Leaf, addr ServerAddr) error { + + // Ensure that we have a pending sync log entry for this + // leaf and server pair. This will allow us to handle all + // pending syncs in the event of a restart or at a different + // point in the envoy. + _, err := f.cfg.FederationDB.UpsertFederationProofSyncLog( + ctx, uniID, key, addr, SyncDirectionPush, + ProofSyncStatusPending, true, + ) + if err != nil { + return fmt.Errorf("unable to log proof sync as pending: %w", + err) + } + + // Push the proof to the remote server. + err = f.pushProofToServer(ctx, uniID, key, leaf, addr) + if err != nil { + return fmt.Errorf("cannot push proof to remote server(%v): %w", + addr.HostStr(), err) + } + + // We did not encounter an error in our proof push + // attempt. Log the proof sync attempt as complete. + _, err = f.cfg.FederationDB.UpsertFederationProofSyncLog( + ctx, uniID, key, addr, SyncDirectionPush, + ProofSyncStatusComplete, false, + ) + if err != nil { + return fmt.Errorf("unable to log proof sync attempt: %w", err) + } + + return nil +} + +// pushProofToFederation attempts to push out a new proof to the current +// federation in parallel. +func (f *FederationEnvoy) pushProofToFederation(ctx context.Context, + uniID Identifier, key LeafKey, leaf *Leaf, fedServers []ServerAddr, + logProofSync bool) { + + log.Infof("Pushing proof to %v federation members, proof_key=%v", + len(fedServers), spew.Sdump(key)) // To push a new proof out, we'll attempt to dial to the remote // registrar, then will attempt to push the new proof directly to the // register. pushNewProof := func(ctx context.Context, addr ServerAddr) error { - remoteUniverseServer, err := f.cfg.NewRemoteRegistrar(addr) - if err != nil { - log.Warnf("cannot push proof unable to connect "+ - "to remote server(%v): %v", addr.HostStr(), - err) + // If we are logging proof sync attempts, we will use the + // logged version of the push function. + if logProofSync { + err := f.pushProofToServerLogged( + ctx, uniID, key, leaf, addr, + ) + if err != nil { + log.Warnf("Cannot push proof via logged "+ + "server push: %v", err) + } + return nil } - _, err = remoteUniverseServer.UpsertProofLeaf( - ctx, uniID, key, leaf, - ) + // If we are not logging proof sync attempts, we will use the + // non-logged version of the push function. + err := f.pushProofToServer(ctx, uniID, key, leaf, addr) if err != nil { - log.Warnf("cannot push proof to remote "+ - "server(%v): %v", addr.HostStr(), err) + log.Warnf("Cannot push proof: %v", err) } + return nil } // To conclude, we'll attempt to push the new proof to all the universe // servers in parallel. - err = fn.ParSlice(ctx, fedServers, pushNewProof) + err := fn.ParSlice(ctx, fedServers, pushNewProof) if err != nil { // TODO(roasbeef): retry in the background until successful? log.Errorf("unable to push proof to federation: %v", err) @@ -281,6 +344,57 @@ func (f *FederationEnvoy) pushProofToFederation(uniID Identifier, key LeafKey, } } +// filterProofSyncPending filters out servers that have already been synced +// with for the given leaf. +func (f *FederationEnvoy) filterProofSyncPending(fedServers []ServerAddr, + uniID Identifier, key LeafKey) ([]ServerAddr, error) { + + // If there are no servers to filter, then we'll return early. This + // saves from querying the database unnecessarily. + if len(fedServers) == 0 { + return nil, nil + } + + ctx, cancel := f.WithCtxQuit() + defer cancel() + + // Select all sync push complete log entries for the given universe + // leaf. If there are any servers which are sync complete within this + // log set, we will filter them out of our target server set. + logs, err := f.cfg.FederationDB.QueryFederationProofSyncLog( + ctx, uniID, key, SyncDirectionPush, + ProofSyncStatusComplete, + ) + if err != nil { + return nil, fmt.Errorf("unable to query federation sync log: %w", + err) + } + + // Construct a map of servers that have already been synced with for the + // given leaf. + syncedServers := make(map[string]struct{}) + for idx := range logs { + logEntry := logs[idx] + syncedServers[logEntry.ServerAddr.HostStr()] = struct{}{} + } + + // Filter out servers that we've already pushed to. + filteredFedServers := fn.Filter(fedServers, func(a ServerAddr) bool { + // Filter out servers that have a log entry with sync status + // complete. + if _, ok := syncedServers[a.HostStr()]; ok { + return false + } + + // By this point we haven't found logs corresponding to the + // given server, we will therefore return true and include the + // server as a sync target for the given leaf. + return true + }) + + return filteredFedServers, nil +} + // syncer is the main goroutine that's responsible for interacting with the // federation envoy. It also accepts incoming requests to push out new updates // to the federation. @@ -294,12 +408,24 @@ func (f *FederationEnvoy) syncer() { syncTicker := time.NewTicker(f.cfg.SyncInterval) defer syncTicker.Stop() + // We'll use a timeout that's slightly less than the sync interval to + // help avoid ticking into a new sync event before the previous event + // has finished. + syncContextTimeout := f.cfg.SyncInterval - 1*time.Second + if syncContextTimeout < 0 { + // If the sync interval is less than a second, then we'll use + // the sync interval as the timeout. + syncContextTimeout = f.cfg.SyncInterval + } + for { select { // A new sync event has just been triggered, so we'll attempt // to synchronize state with all the active universe servers in // the federation. case <-syncTicker.C: + log.Debug("Federation envoy handling new tick event") + // Error propagation is handled in tryFetchServers, we // only need to exit here. fedServers, err := f.tryFetchServers() @@ -318,11 +444,60 @@ func (f *FederationEnvoy) syncer() { continue } + // After we've synced with the federation, we'll + // attempt to push out any pending proofs that we + // haven't yet completed. + ctxFetchLog, cancelFetchLog := f.WithCtxQuitNoTimeout() + syncDirection := SyncDirectionPush + db := f.cfg.FederationDB + logEntries, err := db.FetchPendingProofsSyncLog( + ctxFetchLog, &syncDirection, + ) + cancelFetchLog() + if err != nil { + log.Warnf("unable to query pending push "+ + "sync log: %w", err) + continue + } + + if len(logEntries) > 0 { + log.Debugf("Handling pending proof sync log "+ + "entries (entries_count=%d)", + len(logEntries)) + } + + // TODO(ffranr): Take account of any new servers that + // have been added since the last time we populated the + // log for a given proof leaf. Pending proof sync log + // entries are only relevant for the set of servers + // that existed at the time the log entry was created. + // If a new server is added, then we should create a + // new log entry for the new server. + + for idx := range logEntries { + entry := logEntries[idx] + + servers := []ServerAddr{ + entry.ServerAddr, + } + + ctxPush, cancelPush := + f.CtxBlockingCustomTimeout( + syncContextTimeout, + ) + f.pushProofToFederation( + ctxPush, entry.UniID, entry.LeafKey, + &entry.Leaf, servers, true, + ) + cancelPush() + } + // A new push request has just arrived. We'll perform a // asynchronous registration with the local Universe registrar, // then push it out in an async manner to the federation // members. case pushReq := <-f.pushRequests: + log.Debug("Federation envoy handling push request") ctx, cancel := f.WithCtxQuit() // First, we'll attempt to registrar the proof leaf with @@ -346,13 +521,53 @@ func (f *FederationEnvoy) syncer() { // proof out to the federation in the background. pushReq.resp <- newProof - // With the response sent above, we'll push this out to - // all the Universe servers in the background. - go f.pushProofToFederation( - pushReq.ID, pushReq.Key, pushReq.Leaf, + // Fetch all universe servers in our federation. + fedServers, err := f.tryFetchServers() + if err != nil { + err := fmt.Errorf("unable to fetch "+ + "federation servers: %w", err) + log.Warnf(err.Error()) + pushReq.err <- err + continue + } + + if len(fedServers) == 0 { + log.Warnf("could not find any federation " + + "servers") + continue + } + + if pushReq.LogProofSync { + // We are attempting to sync using the + // logged proof sync procedure. We will + // therefore narrow down the set of target + // servers based on the sync log. Only servers + // that are not yet push sync complete will be + // targeted. + fedServers, err = f.filterProofSyncPending( + fedServers, pushReq.ID, pushReq.Key, + ) + if err != nil { + log.Warnf("failed to filter " + + "federation servers") + continue + } + } + + // With the response sent above, we'll push this + // out to all the Universe servers in the + // background. + ctxPush, cancelPush := f.WithCtxQuitNoTimeout() + f.pushProofToFederation( + ctxPush, pushReq.ID, pushReq.Key, + pushReq.Leaf, fedServers, + pushReq.LogProofSync, ) + cancelPush() case pushReq := <-f.batchPushRequests: + log.Debug("Federation envoy handling batch push " + + "request") ctx, cancel := f.WithCtxQuitNoTimeout() // First, we'll attempt to registrar the proof leaf with @@ -375,16 +590,34 @@ func (f *FederationEnvoy) syncer() { // we'll return back to the caller. pushReq.resp <- struct{}{} + // Fetch all universe servers in our federation. + fedServers, err := f.tryFetchServers() + if err != nil { + err := fmt.Errorf("unable to fetch "+ + "federation servers: %w", err) + log.Warnf(err.Error()) + pushReq.err <- err + continue + } + + if len(fedServers) == 0 { + log.Warnf("could not find any federation " + + "servers") + continue + } + // With the response sent above, we'll push this out to // all the Universe servers in the background. - go func() { - for idx := range pushReq.Batch { - item := pushReq.Batch[idx] - f.pushProofToFederation( - item.ID, item.Key, item.Leaf, - ) - } - }() + ctxPush, cancelPush := f.WithCtxQuitNoTimeout() + for idx := range pushReq.Batch { + item := pushReq.Batch[idx] + + f.pushProofToFederation( + ctxPush, item.ID, item.Key, item.Leaf, + fedServers, item.LogProofSync, + ) + } + cancelPush() case <-f.Quit: return @@ -400,12 +633,18 @@ func (f *FederationEnvoy) syncer() { func (f *FederationEnvoy) UpsertProofLeaf(_ context.Context, id Identifier, key LeafKey, leaf *Leaf) (*Proof, error) { + // If we're attempting to push an issuance proof, then we'll ensure + // that we track the sync attempt to ensure that we retry in the event + // of a failure. + logProofSync := id.ProofType == ProofTypeIssuance + pushReq := &FederationPushReq{ - ID: id, - Key: key, - Leaf: leaf, - resp: make(chan *Proof, 1), - err: make(chan error, 1), + ID: id, + Key: key, + Leaf: leaf, + LogProofSync: logProofSync, + resp: make(chan *Proof, 1), + err: make(chan error, 1), } if !fn.SendOrQuit(f.pushRequests, pushReq, f.Quit) { diff --git a/universe/interface.go b/universe/interface.go index 0e8ae8181..20273278b 100644 --- a/universe/interface.go +++ b/universe/interface.go @@ -400,6 +400,12 @@ type Item struct { // MetaReveal is the meta reveal associated with the given proof leaf. MetaReveal *proof.MetaReveal + + // LogProofSync is a boolean that indicates, if true, that the proof + // leaf sync attempt should be logged and actively managed to ensure + // that the federation push procedure is repeated in the event of a + // failure. + LogProofSync bool } // BatchRegistrar is an interface that allows a caller to register a batch of From 9678ce7cd31ffdd7715f4c18ba93297e1c70c977 Mon Sep 17 00:00:00 2001 From: ffranr Date: Wed, 29 Nov 2023 21:36:59 +0000 Subject: [PATCH 22/54] universe+tapdb: add SQL statement to delete proof sync log entries --- tapdb/sqlc/querier.go | 1 + tapdb/sqlc/queries/universe.sql | 22 ++++++++++++++++++- tapdb/sqlc/universe.sql.go | 38 +++++++++++++++++++++++++++++++++ tapdb/universe_federation.go | 38 +++++++++++++++++++++++++++++++++ universe/interface.go | 4 ++++ 5 files changed, 102 insertions(+), 1 deletion(-) diff --git a/tapdb/sqlc/querier.go b/tapdb/sqlc/querier.go index c20633c10..3a9a6b27a 100644 --- a/tapdb/sqlc/querier.go +++ b/tapdb/sqlc/querier.go @@ -25,6 +25,7 @@ type Querier interface { DeleteAllNodes(ctx context.Context, namespace string) (int64, error) DeleteAssetWitnesses(ctx context.Context, assetID int64) error DeleteExpiredUTXOLeases(ctx context.Context, now sql.NullTime) error + DeleteFederationProofSyncLog(ctx context.Context, arg DeleteFederationProofSyncLogParams) error DeleteManagedUTXO(ctx context.Context, outpoint []byte) error DeleteNode(ctx context.Context, arg DeleteNodeParams) (int64, error) DeleteRoot(ctx context.Context, namespace string) (int64, error) diff --git a/tapdb/sqlc/queries/universe.sql b/tapdb/sqlc/queries/universe.sql index 3fd9a75b2..767c50f25 100644 --- a/tapdb/sqlc/queries/universe.sql +++ b/tapdb/sqlc/queries/universe.sql @@ -460,4 +460,24 @@ WHERE (log.sync_direction = sqlc.narg('sync_direction') OR sqlc.narg('leaf_minting_point_bytes') IS NULL) AND (leaf.script_key_bytes = sqlc.narg('leaf_script_key_bytes') - OR sqlc.narg('leaf_script_key_bytes') IS NULL); \ No newline at end of file + OR sqlc.narg('leaf_script_key_bytes') IS NULL); + +-- name: DeleteFederationProofSyncLog :exec +WITH selected_server_id AS ( + -- Select the server ids from the universe_servers table for the specified + -- hosts. + SELECT id + FROM universe_servers + WHERE + (server_host = sqlc.narg('server_host') + OR sqlc.narg('server_host') IS NULL) +) +DELETE FROM federation_proof_sync_log +WHERE + servers_id IN (SELECT id FROM selected_server_id) AND + (status = sqlc.narg('status') + OR sqlc.narg('status') IS NULL) AND + (timestamp >= sqlc.narg('min_timestamp') + OR sqlc.narg('min_timestamp') IS NULL) AND + (attempt_counter >= sqlc.narg('min_attempt_counter') + OR sqlc.narg('min_attempt_counter') IS NULL); \ No newline at end of file diff --git a/tapdb/sqlc/universe.sql.go b/tapdb/sqlc/universe.sql.go index 9c5c21339..b1e7b33ec 100644 --- a/tapdb/sqlc/universe.sql.go +++ b/tapdb/sqlc/universe.sql.go @@ -11,6 +11,44 @@ import ( "time" ) +const deleteFederationProofSyncLog = `-- name: DeleteFederationProofSyncLog :exec +WITH selected_server_id AS ( + -- Select the server ids from the universe_servers table for the specified + -- hosts. + SELECT id + FROM universe_servers + WHERE + (server_host = $4 + OR $4 IS NULL) +) +DELETE FROM federation_proof_sync_log +WHERE + servers_id IN (SELECT id FROM selected_server_id) AND + (status = $1 + OR $1 IS NULL) AND + (timestamp >= $2 + OR $2 IS NULL) AND + (attempt_counter >= $3 + OR $3 IS NULL) +` + +type DeleteFederationProofSyncLogParams struct { + Status sql.NullString + MinTimestamp sql.NullTime + MinAttemptCounter sql.NullInt64 + ServerHost sql.NullString +} + +func (q *Queries) DeleteFederationProofSyncLog(ctx context.Context, arg DeleteFederationProofSyncLogParams) error { + _, err := q.db.ExecContext(ctx, deleteFederationProofSyncLog, + arg.Status, + arg.MinTimestamp, + arg.MinAttemptCounter, + arg.ServerHost, + ) + return err +} + const deleteUniverseEvents = `-- name: DeleteUniverseEvents :exec WITH root_id AS ( SELECT id diff --git a/tapdb/universe_federation.go b/tapdb/universe_federation.go index a7b5d49f3..c6bbea8a5 100644 --- a/tapdb/universe_federation.go +++ b/tapdb/universe_federation.go @@ -32,6 +32,9 @@ type ( // logs. QueryFedProofSyncLogParams = sqlc.QueryFederationProofSyncLogParams + // DeleteFedProofSyncLogParams is used to delete proof sync log entries. + DeleteFedProofSyncLogParams = sqlc.DeleteFederationProofSyncLogParams + // ProofSyncLogEntry is a single entry from the proof sync log. ProofSyncLogEntry = sqlc.QueryFederationProofSyncLogRow @@ -92,6 +95,10 @@ type FederationProofSyncLogStore interface { // given proof leaf. QueryFederationProofSyncLog(ctx context.Context, arg QueryFedProofSyncLogParams) ([]ProofSyncLogEntry, error) + + // DeleteFederationProofSyncLog deletes proof sync log entries. + DeleteFederationProofSyncLog(ctx context.Context, + arg DeleteFedProofSyncLogParams) error } // FederationSyncConfigStore is used to manage the set of Universe servers as @@ -565,6 +572,37 @@ func fetchProofSyncLogEntry(ctx context.Context, entry ProofSyncLogEntry, }, nil } +// DeleteProofsSyncLogEntries deletes a set of proof sync log entries. +func (u *UniverseFederationDB) DeleteProofsSyncLogEntries(ctx context.Context, + servers ...universe.ServerAddr) error { + + var writeTx UniverseFederationOptions + + err := u.db.ExecTx(ctx, &writeTx, func(db UniverseServerStore) error { + // Delete proof sync log entries which are associated with each + // server. + for i := range servers { + server := servers[i] + + err := db.DeleteFederationProofSyncLog( + ctx, DeleteFedProofSyncLogParams{ + ServerHost: sqlStr(server.HostStr()), + }, + ) + if err != nil { + return err + } + } + + return nil + }) + if err != nil { + return err + } + + return nil +} + // UpsertFederationSyncConfig upserts both the global and universe specific // federation sync configs. func (u *UniverseFederationDB) UpsertFederationSyncConfig( diff --git a/universe/interface.go b/universe/interface.go index 20273278b..4915241aa 100644 --- a/universe/interface.go +++ b/universe/interface.go @@ -865,6 +865,10 @@ type FederationProofSyncLog interface { // returns all log entries with sync status pending. FetchPendingProofsSyncLog(ctx context.Context, syncDirection *SyncDirection) ([]*ProofSyncLogEntry, error) + + // DeleteProofsSyncLogEntries deletes proof sync log entries. + DeleteProofsSyncLogEntries(ctx context.Context, + servers ...ServerAddr) error } // FederationDB is used for CRUD operations related to federation logs and From ece2a7b81a6f274bfc46d9ce5dd77f5f66a5ab65 Mon Sep 17 00:00:00 2001 From: ffranr Date: Wed, 29 Nov 2023 21:37:44 +0000 Subject: [PATCH 23/54] rpcserver: when removing fed server also remove from proof sync log --- rpcserver.go | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/rpcserver.go b/rpcserver.go index 59f273c2f..992bd79d4 100644 --- a/rpcserver.go +++ b/rpcserver.go @@ -3997,7 +3997,17 @@ func (r *rpcServer) DeleteFederationServer(ctx context.Context, serversToDel := fn.Map(req.Servers, unmarshalUniverseServer) - err := r.cfg.FederationDB.RemoveServers(ctx, serversToDel...) + // Remove the servers from the proofs sync log. This is necessary before + // we can remove the servers from the database because of a foreign + // key constraint. + err := r.cfg.FederationDB.DeleteProofsSyncLogEntries( + ctx, serversToDel..., + ) + if err != nil { + return nil, err + } + + err = r.cfg.FederationDB.RemoveServers(ctx, serversToDel...) if err != nil { return nil, err } From 7815cac667f45630f4151e84a8eed4dfb1df569b Mon Sep 17 00:00:00 2001 From: ffranr Date: Wed, 29 Nov 2023 16:45:22 +0000 Subject: [PATCH 24/54] itest: add tapd harness config param for federation sync ticker interval --- itest/tapd_harness.go | 8 ++++++++ itest/test_harness.go | 5 +++++ 2 files changed, 13 insertions(+) diff --git a/itest/tapd_harness.go b/itest/tapd_harness.go index fe81f4ef2..1c500875a 100644 --- a/itest/tapd_harness.go +++ b/itest/tapd_harness.go @@ -100,6 +100,10 @@ type harnessOpts struct { proofCourier proof.CourierHarness custodianProofRetrievalDelay *time.Duration addrAssetSyncerDisable bool + + // fedSyncTickerInterval is the interval at which the federation envoy + // sync ticker will fire. + fedSyncTickerInterval *time.Duration } type harnessOption func(*harnessOpts) @@ -242,6 +246,10 @@ func newTapdHarness(t *testing.T, ht *harnessTest, cfg tapdConfig, finalCfg.CustodianProofRetrievalDelay = *opts.custodianProofRetrievalDelay } + if opts.fedSyncTickerInterval != nil { + finalCfg.Universe.SyncInterval = *opts.fedSyncTickerInterval + } + return &tapdHarness{ cfg: &cfg, clientCfg: finalCfg, diff --git a/itest/test_harness.go b/itest/test_harness.go index 1550f3a7b..2614bbe82 100644 --- a/itest/test_harness.go +++ b/itest/test_harness.go @@ -366,6 +366,10 @@ type tapdHarnessParams struct { // synced from the above node. startupSyncNumAssets int + // fedSyncTickerInterval is the interval at which the federation envoy + // sync ticker will fire. + fedSyncTickerInterval *time.Duration + // noDefaultUniverseSync indicates whether the default universe server // should be added as a federation server or not. noDefaultUniverseSync bool @@ -402,6 +406,7 @@ func setupTapdHarness(t *testing.T, ht *harnessTest, ho.proofCourier = selectedProofCourier ho.custodianProofRetrievalDelay = params.custodianProofRetrievalDelay ho.addrAssetSyncerDisable = params.addrAssetSyncerDisable + ho.fedSyncTickerInterval = params.fedSyncTickerInterval } tapdHarness, err := newTapdHarness(t, ht, tapdConfig{ From 073539866fed6772cc5fa837b2e1c7bf3e40e5bf Mon Sep 17 00:00:00 2001 From: ffranr Date: Wed, 29 Nov 2023 16:53:13 +0000 Subject: [PATCH 25/54] itest: test that the fed envoy reattempts mint proof push syncs This commit adds an integration test which helps to ensure that a minting node will retry pushing a minting proof to a federation server peer node, in the event that that peer node failed to receive the proof at the time of the initial sync attempt. --- itest/test_list_on_test.go | 4 ++ itest/universe_federation_test.go | 99 +++++++++++++++++++++++++++++++ 2 files changed, 103 insertions(+) create mode 100644 itest/universe_federation_test.go diff --git a/itest/test_list_on_test.go b/itest/test_list_on_test.go index 9f0297038..9d4d6a582 100644 --- a/itest/test_list_on_test.go +++ b/itest/test_list_on_test.go @@ -195,6 +195,10 @@ var testCases = []*testCase{ name: "universe pagination simple", test: testUniversePaginationSimple, }, + { + name: "mint proof repeat fed sync attempt", + test: testMintProofRepeatFedSyncAttempt, + }, } var optionalTestCases = []*testCase{ diff --git a/itest/universe_federation_test.go b/itest/universe_federation_test.go new file mode 100644 index 000000000..69d7dd712 --- /dev/null +++ b/itest/universe_federation_test.go @@ -0,0 +1,99 @@ +package itest + +import ( + "context" + "time" + + "github.com/lightninglabs/taproot-assets/taprpc/mintrpc" + unirpc "github.com/lightninglabs/taproot-assets/taprpc/universerpc" + "github.com/stretchr/testify/require" +) + +// testMintProofRepeatFedSyncAttempt tests that the minting node will retry +// pushing the minting proofs to the federation server peer node, if the peer +// node is offline at the time of the initial sync attempt. +func testMintProofRepeatFedSyncAttempt(t *harnessTest) { + // Create a new minting node, without hooking it up to any existing + // Universe server. We will also set the sync ticker to 4 second, so + // that we can test that the proof push sync is retried and eventually + // succeeds after the fed server peer node reappears online. + syncTickerInterval := 4 * time.Second + mintingNode := setupTapdHarness( + t.t, t, t.lndHarness.Bob, nil, + func(params *tapdHarnessParams) { + params.fedSyncTickerInterval = &syncTickerInterval + params.noDefaultUniverseSync = true + }, + ) + defer func() { + require.NoError(t.t, mintingNode.stop(!*noDelete)) + }() + + // We'll use the main node as our federation universe server + // counterparty. + fedServerNode := t.tapd + + // Keep a reference to the fed server node RPC host address, so that we + // can assert that it has not changed after the restart. This is + // important, because the minting node will be retrying the proof push + // to this address. + fedServerNodeRpcHost := fedServerNode.rpcHost() + + // Register the fedServerNode as a federation universe server with the + // minting node. + ctxb := context.Background() + ctxt, cancel := context.WithTimeout(ctxb, defaultWaitTimeout) + defer cancel() + + _, err := mintingNode.AddFederationServer( + ctxt, &unirpc.AddFederationServerRequest{ + Servers: []*unirpc.UniverseFederationServer{ + { + Host: fedServerNodeRpcHost, + }, + }, + }, + ) + require.NoError(t.t, err) + + // Assert that the fed server node has not seen any asset proofs. + AssertUniverseStats(t.t, fedServerNode, 0, 0, 0) + + // Stop the federation server peer node, so that it does not receive the + // newly minted asset proofs immediately upon minting. + t.Logf("Stopping fed server tapd node") + require.NoError(t.t, fedServerNode.stop(false)) + + // Now that federation peer node is inactive, we'll mint some assets. + t.Logf("Minting assets on minting node") + rpcAssets := MintAssetsConfirmBatch( + t.t, t.lndHarness.Miner.Client, mintingNode, + []*mintrpc.MintAssetRequest{ + simpleAssets[0], issuableAssets[0], + }, + ) + require.Len(t.t, rpcAssets, 2) + + t.lndHarness.MineBlocks(7) + + // Wait for the minting node to attempt (and fail) to push the minting + // proofs to the fed peer node. We wait some multiple of the sync ticker + // interval to ensure that the minting node has had time to retry the + // proof push sync. + time.Sleep(syncTickerInterval * 2) + + // Start the federation server peer node. The federation envoy component + // of our minting node should currently be retrying the proof push sync + // with the federation peer at each tick. + t.Logf("Start (previously stopped) fed server tapd node") + err = fedServerNode.start(false) + require.NoError(t.t, err) + + // Ensure that the federation server node RPC host address has not + // changed after the restart. If it has, then the minting node will be + // retrying the proof push to the wrong address. + require.Equal(t.t, fedServerNodeRpcHost, fedServerNode.rpcHost()) + + t.Logf("Assert that fed peer node has seen the asset minting proofs") + AssertUniverseStats(t.t, fedServerNode, 2, 2, 1) +} From 99f9920bd5311a79a6ecfaab74607f7902d1f263 Mon Sep 17 00:00:00 2001 From: ffranr Date: Fri, 1 Dec 2023 17:43:40 +0000 Subject: [PATCH 26/54] tapdb: add unit test multi db store handler This structure will allow us to pass around the same db store handler to different helper functions which will aid in setting up a unit test db. --- tapdb/sqlutils_test.go | 77 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) create mode 100644 tapdb/sqlutils_test.go diff --git a/tapdb/sqlutils_test.go b/tapdb/sqlutils_test.go new file mode 100644 index 000000000..492a479ee --- /dev/null +++ b/tapdb/sqlutils_test.go @@ -0,0 +1,77 @@ +package tapdb + +import ( + "database/sql" + "testing" + "time" + + "github.com/lightninglabs/taproot-assets/tapdb/sqlc" + "github.com/lightningnetwork/lnd/clock" +) + +// DbHandler is a helper struct that contains all the database stores. +type DbHandler struct { + // UniverseFederationStore is a handle to the universe federation store. + UniverseFederationStore *UniverseFederationDB + + // MultiverseStore is a handle to the multiverse store. + MultiverseStore *MultiverseStore + + // AssetMintingStore is a handle to the pending (minting) assets store. + AssetMintingStore *AssetMintingStore + + // AssetStore is a handle to the active assets store. + AssetStore *AssetStore + + // DirectQuery is a handle to the underlying database that can be used + // to query the database directly. + DirectQuery sqlc.Querier +} + +// NewDbHandle creates a new store and query handle to the test database. +func NewDbHandle(t *testing.T) *DbHandler { + // Create a new test database. + db := NewTestDB(t) + + testClock := clock.NewTestClock(time.Now()) + + // Gain a handle to the pending (minting) universe federation store. + universeServerTxCreator := NewTransactionExecutor( + db, func(tx *sql.Tx) UniverseServerStore { + return db.WithTx(tx) + }, + ) + fedStore := NewUniverseFederationDB(universeServerTxCreator, testClock) + + // Gain a handle to the multiverse store. + multiverseTxCreator := NewTransactionExecutor(db, + func(tx *sql.Tx) BaseMultiverseStore { + return db.WithTx(tx) + }, + ) + multiverseStore := NewMultiverseStore(multiverseTxCreator) + + // Gain a handle to the pending (minting) assets store. + assetMintingDB := NewTransactionExecutor( + db, func(tx *sql.Tx) PendingAssetStore { + return db.WithTx(tx) + }, + ) + assetMintingStore := NewAssetMintingStore(assetMintingDB) + + // Gain a handle to the active assets store. + assetsDB := NewTransactionExecutor( + db, func(tx *sql.Tx) ActiveAssetsStore { + return db.WithTx(tx) + }, + ) + activeAssetsStore := NewAssetStore(assetsDB, testClock) + + return &DbHandler{ + UniverseFederationStore: fedStore, + MultiverseStore: multiverseStore, + AssetMintingStore: assetMintingStore, + AssetStore: activeAssetsStore, + DirectQuery: db, + } +} From d299bf50f4c934cd623b21750baa77e90863547d Mon Sep 17 00:00:00 2001 From: ffranr Date: Fri, 1 Dec 2023 17:45:36 +0000 Subject: [PATCH 27/54] tapdb: refactor randProof such that it optionally uses a provided asset --- tapdb/universe_test.go | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/tapdb/universe_test.go b/tapdb/universe_test.go index c5803e02b..b82223e40 100644 --- a/tapdb/universe_test.go +++ b/tapdb/universe_test.go @@ -129,7 +129,12 @@ func randLeafKey(t *testing.T) universe.LeafKey { } } -func randProof(t *testing.T) *proof.Proof { +func randProof(t *testing.T, argAsset *asset.Asset) *proof.Proof { + proofAsset := *asset.RandAsset(t, asset.Normal) + if argAsset != nil { + proofAsset = *argAsset + } + return &proof.Proof{ PrevOut: wire.OutPoint{}, BlockHeader: wire.BlockHeader{ @@ -142,7 +147,7 @@ func randProof(t *testing.T) *proof.Proof { }}, }, TxMerkleProof: proof.TxMerkleProof{}, - Asset: *asset.RandAsset(t, asset.Normal), + Asset: proofAsset, InclusionProof: proof.TaprootProof{ InternalKey: test.RandPubKey(t), }, @@ -152,7 +157,7 @@ func randProof(t *testing.T) *proof.Proof { func randMintingLeaf(t *testing.T, assetGen asset.Genesis, groupKey *btcec.PublicKey) universe.Leaf { - randProof := randProof(t) + randProof := randProof(t, nil) leaf := universe.Leaf{ GenesisWithGroup: universe.GenesisWithGroup{ @@ -320,7 +325,7 @@ func TestUniverseIssuanceProofs(t *testing.T) { testLeaf := &testLeaves[idx] var proofBuf bytes.Buffer - randProof := randProof(t) + randProof := randProof(t, nil) require.NoError(t, randProof.Encode(&proofBuf)) testLeaf.Leaf.RawProof = proofBuf.Bytes() From 1efd41cf46fa0465984eef1d48d76ba385c79264 Mon Sep 17 00:00:00 2001 From: ffranr Date: Fri, 1 Dec 2023 17:58:11 +0000 Subject: [PATCH 28/54] tapdb: extract unit test func for populating asset and proof in db This commit adds a unit test db handler helper method which we will use in a new test (in a subsequent commit) to populate a unit test db with an asset and its corresponding proof. --- tapdb/assets_store_test.go | 108 ++++-------------------------- tapdb/sqlutils_test.go | 130 +++++++++++++++++++++++++++++++++++++ 2 files changed, 141 insertions(+), 97 deletions(-) diff --git a/tapdb/assets_store_test.go b/tapdb/assets_store_test.go index 85a914033..82c607300 100644 --- a/tapdb/assets_store_test.go +++ b/tapdb/assets_store_test.go @@ -246,105 +246,17 @@ func assertAssetEqual(t *testing.T, a, b *asset.Asset) { func TestImportAssetProof(t *testing.T) { t.Parallel() - // First, we'll create a new instance of the database. - _, assetStore, db := newAssetStore(t) - - // Next, we'll make a new random asset that also has a few inputs with - // dummy witness information. - testAsset := randAsset(t) - - assetRoot, err := commitment.NewAssetCommitment(testAsset) - require.NoError(t, err) - - taprootAssetRoot, err := commitment.NewTapCommitment(assetRoot) - require.NoError(t, err) - - // With our asset created, we can now create the AnnotatedProof we use - // to import assets into the database. - var blockHash chainhash.Hash - _, err = rand.Read(blockHash[:]) - require.NoError(t, err) + var ( + ctxb = context.Background() - anchorTx := wire.NewMsgTx(2) - anchorTx.AddTxIn(&wire.TxIn{}) - anchorTx.AddTxOut(&wire.TxOut{ - PkScript: bytes.Repeat([]byte{0x01}, 34), - Value: 10, - }) + dbHandle = NewDbHandle(t) + assetStore = dbHandle.AssetStore + ) + // Add a random asset and corresponding proof into the database. + testAsset, testProof := dbHandle.AddRandomAssetProof(t) assetID := testAsset.ID() - anchorPoint := wire.OutPoint{ - Hash: anchorTx.TxHash(), - Index: 0, - } - initialBlob := bytes.Repeat([]byte{0x0}, 100) - updatedBlob := bytes.Repeat([]byte{0x77}, 100) - testProof := &proof.AnnotatedProof{ - Locator: proof.Locator{ - AssetID: &assetID, - ScriptKey: *testAsset.ScriptKey.PubKey, - }, - Blob: initialBlob, - AssetSnapshot: &proof.AssetSnapshot{ - Asset: testAsset, - OutPoint: anchorPoint, - AnchorBlockHash: blockHash, - AnchorBlockHeight: test.RandInt[uint32](), - AnchorTxIndex: test.RandInt[uint32](), - AnchorTx: anchorTx, - OutputIndex: 0, - InternalKey: test.RandPubKey(t), - ScriptRoot: taprootAssetRoot, - }, - } - if testAsset.GroupKey != nil { - testProof.GroupKey = &testAsset.GroupKey.GroupPubKey - } - - // We'll now insert the internal key information as well as the script - // key ahead of time to reflect the address creation that happens - // elsewhere. - ctxb := context.Background() - _, err = db.UpsertInternalKey(ctxb, InternalKey{ - RawKey: testProof.InternalKey.SerializeCompressed(), - KeyFamily: test.RandInt[int32](), - KeyIndex: test.RandInt[int32](), - }) - require.NoError(t, err) - rawScriptKeyID, err := db.UpsertInternalKey(ctxb, InternalKey{ - RawKey: testAsset.ScriptKey.RawKey.PubKey.SerializeCompressed(), - KeyFamily: int32(testAsset.ScriptKey.RawKey.Family), - KeyIndex: int32(testAsset.ScriptKey.RawKey.Index), - }) - require.NoError(t, err) - _, err = db.UpsertScriptKey(ctxb, NewScriptKey{ - InternalKeyID: rawScriptKeyID, - TweakedScriptKey: testAsset.ScriptKey.PubKey.SerializeCompressed(), - Tweak: nil, - }) - require.NoError(t, err) - - // We'll add the chain transaction of the proof now to simulate a - // batched transfer on a higher layer. - var anchorTxBuf bytes.Buffer - err = testProof.AnchorTx.Serialize(&anchorTxBuf) - require.NoError(t, err) - anchorTXID := testProof.AnchorTx.TxHash() - _, err = db.UpsertChainTx(ctxb, ChainTxParams{ - Txid: anchorTXID[:], - RawTx: anchorTxBuf.Bytes(), - BlockHeight: sqlInt32(testProof.AnchorBlockHeight), - BlockHash: testProof.AnchorBlockHash[:], - TxIndex: sqlInt32(testProof.AnchorTxIndex), - }) - require.NoError(t, err, "unable to insert chain tx: %w", err) - - // With all our test data constructed, we'll now attempt to import the - // asset into the database. - require.NoError(t, assetStore.ImportProofs( - ctxb, proof.MockHeaderVerifier, proof.MockGroupVerifier, false, - testProof, - )) + initialBlob := testProof.Blob // We should now be able to retrieve the set of all assets inserted on // disk. @@ -371,7 +283,7 @@ func TestImportAssetProof(t *testing.T) { ScriptKey: *testAsset.ScriptKey.PubKey, }) require.NoError(t, err) - require.Equal(t, initialBlob, []byte(currentBlob)) + require.Equal(t, initialBlob, currentBlob) // We should also be able to fetch the created asset above based on // either the asset ID, or key group via the main coin selection @@ -391,6 +303,8 @@ func TestImportAssetProof(t *testing.T) { // We'll now attempt to overwrite the proof with one that has different // block information (simulating a re-org). + updatedBlob := bytes.Repeat([]byte{0x77}, 100) + testProof.AnchorBlockHash = chainhash.Hash{12, 34, 56} testProof.AnchorBlockHeight = 1234 testProof.AnchorTxIndex = 5678 diff --git a/tapdb/sqlutils_test.go b/tapdb/sqlutils_test.go index 492a479ee..5c3114ea4 100644 --- a/tapdb/sqlutils_test.go +++ b/tapdb/sqlutils_test.go @@ -1,12 +1,22 @@ package tapdb import ( + "bytes" + "context" "database/sql" + "math/rand" "testing" "time" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" + "github.com/lightninglabs/taproot-assets/asset" + "github.com/lightninglabs/taproot-assets/commitment" + "github.com/lightninglabs/taproot-assets/internal/test" + "github.com/lightninglabs/taproot-assets/proof" "github.com/lightninglabs/taproot-assets/tapdb/sqlc" "github.com/lightningnetwork/lnd/clock" + "github.com/stretchr/testify/require" ) // DbHandler is a helper struct that contains all the database stores. @@ -28,6 +38,126 @@ type DbHandler struct { DirectQuery sqlc.Querier } +// AddRandomAssetProof generates a random asset and corresponding proof and +// inserts them into the given test database. +func (d *DbHandler) AddRandomAssetProof(t *testing.T) (*asset.Asset, + *proof.AnnotatedProof) { + + var ( + ctx = context.Background() + + assetStore = d.AssetStore + db = d.DirectQuery + ) + + // Next, we'll make a new random asset that also has a few inputs with + // dummy witness information. + testAsset := randAsset(t) + + assetRoot, err := commitment.NewAssetCommitment(testAsset) + require.NoError(t, err) + + taprootAssetRoot, err := commitment.NewTapCommitment(assetRoot) + require.NoError(t, err) + + // With our asset created, we can now create the AnnotatedProof we use + // to import assets into the database. + var blockHash chainhash.Hash + _, err = rand.Read(blockHash[:]) + require.NoError(t, err) + + anchorTx := wire.NewMsgTx(2) + anchorTx.AddTxIn(&wire.TxIn{}) + anchorTx.AddTxOut(&wire.TxOut{ + PkScript: bytes.Repeat([]byte{0x01}, 34), + Value: 10, + }) + + assetID := testAsset.ID() + anchorPoint := wire.OutPoint{ + Hash: anchorTx.TxHash(), + Index: 0, + } + + // Generate a random proof and encode it into a proof blob. + testProof := randProof(t, testAsset) + + var proofBlobBuffer bytes.Buffer + err = testProof.Encode(&proofBlobBuffer) + require.NoError(t, err) + + proofBlob := proofBlobBuffer.Bytes() + scriptKey := testAsset.ScriptKey + + annotatedProof := &proof.AnnotatedProof{ + Locator: proof.Locator{ + AssetID: &assetID, + ScriptKey: *scriptKey.PubKey, + }, + Blob: proofBlob, + AssetSnapshot: &proof.AssetSnapshot{ + Asset: testAsset, + OutPoint: anchorPoint, + AnchorBlockHash: blockHash, + AnchorBlockHeight: test.RandInt[uint32](), + AnchorTxIndex: test.RandInt[uint32](), + AnchorTx: anchorTx, + OutputIndex: 0, + InternalKey: test.RandPubKey(t), + ScriptRoot: taprootAssetRoot, + }, + } + if testAsset.GroupKey != nil { + annotatedProof.GroupKey = &testAsset.GroupKey.GroupPubKey + } + + // We'll now insert the internal key information as well as the script + // key ahead of time to reflect the address creation that happens + // elsewhere. + _, err = db.UpsertInternalKey(ctx, InternalKey{ + RawKey: annotatedProof.InternalKey.SerializeCompressed(), + KeyFamily: test.RandInt[int32](), + KeyIndex: test.RandInt[int32](), + }) + require.NoError(t, err) + rawScriptKeyID, err := db.UpsertInternalKey(ctx, InternalKey{ + RawKey: scriptKey.RawKey.PubKey.SerializeCompressed(), + KeyFamily: int32(testAsset.ScriptKey.RawKey.Family), + KeyIndex: int32(testAsset.ScriptKey.RawKey.Index), + }) + require.NoError(t, err) + _, err = db.UpsertScriptKey(ctx, NewScriptKey{ + InternalKeyID: rawScriptKeyID, + TweakedScriptKey: scriptKey.PubKey.SerializeCompressed(), + Tweak: nil, + }) + require.NoError(t, err) + + // We'll add the chain transaction of the proof now to simulate a + // batched transfer on a higher layer. + var anchorTxBuf bytes.Buffer + err = annotatedProof.AnchorTx.Serialize(&anchorTxBuf) + require.NoError(t, err) + anchorTXID := annotatedProof.AnchorTx.TxHash() + _, err = db.UpsertChainTx(ctx, ChainTxParams{ + Txid: anchorTXID[:], + RawTx: anchorTxBuf.Bytes(), + BlockHeight: sqlInt32(annotatedProof.AnchorBlockHeight), + BlockHash: annotatedProof.AnchorBlockHash[:], + TxIndex: sqlInt32(annotatedProof.AnchorTxIndex), + }) + require.NoError(t, err, "unable to insert chain tx: %w", err) + + // With all our test data constructed, we'll now attempt to import the + // asset into the database. + require.NoError(t, assetStore.ImportProofs( + ctx, proof.MockHeaderVerifier, proof.MockGroupVerifier, false, + annotatedProof, + )) + + return testAsset, annotatedProof +} + // NewDbHandle creates a new store and query handle to the test database. func NewDbHandle(t *testing.T) *DbHandler { // Create a new test database. From 503fac73445231b3501934b65f3079ab902c4d6d Mon Sep 17 00:00:00 2001 From: ffranr Date: Fri, 1 Dec 2023 18:21:37 +0000 Subject: [PATCH 29/54] tapdb: add test db helper func for server addresses and proof leaves This commit adds a helper method for inserting server addresses into a unit test db. It also adds a helper method for inserting a proof leaf into a unit test db. These helper methods will be used in a subsequent commit. --- tapdb/sqlutils_test.go | 63 +++++++++++++++++++++++++++++++ tapdb/universe_federation_test.go | 23 +++-------- universe/interface.go | 20 ++++++++++ 3 files changed, 89 insertions(+), 17 deletions(-) diff --git a/tapdb/sqlutils_test.go b/tapdb/sqlutils_test.go index 5c3114ea4..6f0786fb3 100644 --- a/tapdb/sqlutils_test.go +++ b/tapdb/sqlutils_test.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "database/sql" + "fmt" "math/rand" "testing" "time" @@ -15,6 +16,7 @@ import ( "github.com/lightninglabs/taproot-assets/internal/test" "github.com/lightninglabs/taproot-assets/proof" "github.com/lightninglabs/taproot-assets/tapdb/sqlc" + "github.com/lightninglabs/taproot-assets/universe" "github.com/lightningnetwork/lnd/clock" "github.com/stretchr/testify/require" ) @@ -158,6 +160,67 @@ func (d *DbHandler) AddRandomAssetProof(t *testing.T) (*asset.Asset, return testAsset, annotatedProof } +// AddUniProofLeaf generates a universe proof leaf and inserts it into the test +// database. +func (d *DbHandler) AddUniProofLeaf(t *testing.T, testAsset *asset.Asset, + annotatedProof *proof.AnnotatedProof) *universe.Proof { + + ctx := context.Background() + + // Insert proof into the multiverse/universe store. This step will + // populate the universe root and universe leaves tables. + uniId := universe.NewUniIDFromAsset(*testAsset) + + leafKey := universe.LeafKey{ + OutPoint: annotatedProof.AssetSnapshot.OutPoint, + ScriptKey: &testAsset.ScriptKey, + } + + leaf := universe.Leaf{ + GenesisWithGroup: universe.GenesisWithGroup{ + Genesis: testAsset.Genesis, + GroupKey: testAsset.GroupKey, + }, + RawProof: annotatedProof.Blob, + Asset: testAsset, + Amt: testAsset.Amount, + } + + uniProof, err := d.MultiverseStore.UpsertProofLeaf( + ctx, uniId, leafKey, &leaf, nil, + ) + require.NoError(t, err) + + return uniProof +} + +// AddRandomServerAddrs is a helper function that will create server addresses +// and add them to the database. +func (d *DbHandler) AddRandomServerAddrs(t *testing.T, + numServers int) []universe.ServerAddr { + + var ( + ctx = context.Background() + fedDB = d.UniverseFederationStore + ) + + addrs := make([]universe.ServerAddr, 0, numServers) + for i := 0; i < numServers; i++ { + portOffset := i + 10_000 + hostStr := fmt.Sprintf("localhost:%v", portOffset) + + addr := universe.NewServerAddr(int64(i+1), hostStr) + addrs = append(addrs, addr) + } + + // With the set of addrs created, we'll now insert them all into the + // database. + err := fedDB.AddServers(ctx, addrs...) + require.NoError(t, err) + + return addrs +} + // NewDbHandle creates a new store and query handle to the test database. func NewDbHandle(t *testing.T) *DbHandler { // Create a new test database. diff --git a/tapdb/universe_federation_test.go b/tapdb/universe_federation_test.go index b7fb80a21..3ffbf80a0 100644 --- a/tapdb/universe_federation_test.go +++ b/tapdb/universe_federation_test.go @@ -3,7 +3,6 @@ package tapdb import ( "context" "database/sql" - "fmt" "testing" "time" @@ -35,10 +34,12 @@ func newTestFederationDb(t *testing.T, func TestUniverseFederationCRUD(t *testing.T) { t.Parallel() - testClock := clock.NewTestClock(time.Now()) - fedDB, _ := newTestFederationDb(t, testClock) + var ( + ctx = context.Background() - ctx := context.Background() + db = NewDbHandle(t) + fedDB = db.UniverseFederationStore + ) // If we try to list the set of servers without any added, we should // get the error we expect. @@ -47,19 +48,7 @@ func TestUniverseFederationCRUD(t *testing.T) { require.Empty(t, dbServers) // Next, we'll try to add a new series of servers to the DB. - const numServers = 10 - addrs := make([]universe.ServerAddr, 0, numServers) - for i := int64(0); i < numServers; i++ { - portOffset := i + 10_000 - hostStr := fmt.Sprintf("localhost:%v", portOffset) - - addrs = append(addrs, universe.NewServerAddr(i+1, hostStr)) - } - - // With the set of addrs created, we'll now insert them all into the - // database. - err = fedDB.AddServers(ctx, addrs...) - require.NoError(t, err) + addrs := db.AddRandomServerAddrs(t, 10) // If we try to insert them all again, then we should get an error as // we ensure the host names are unique. diff --git a/universe/interface.go b/universe/interface.go index 4915241aa..78b8ae60d 100644 --- a/universe/interface.go +++ b/universe/interface.go @@ -84,6 +84,26 @@ func (i *Identifier) StringForLog() string { i.String(), i.AssetID[:], groupKey, i.ProofType) } +// NewUniIDFromAsset creates a new universe ID from an asset. +func NewUniIDFromAsset(a asset.Asset) Identifier { + proofType := ProofTypeTransfer + if a.IsGenesisAsset() { + proofType = ProofTypeIssuance + } + + if a.GroupKey != nil { + return Identifier{ + GroupKey: &a.GroupKey.GroupPubKey, + ProofType: proofType, + } + } + + return Identifier{ + AssetID: a.ID(), + ProofType: proofType, + } +} + // NewUniIDFromRawArgs creates a new universe ID from the raw arguments. The // asset ID bytes and group key bytes are mutually exclusive. If the group key // bytes are set, then the asset ID bytes will be ignored. From 89d212755e1f31b165cc1c0aa79657c034eecc69 Mon Sep 17 00:00:00 2001 From: ffranr Date: Fri, 1 Dec 2023 18:22:18 +0000 Subject: [PATCH 30/54] tapdb: add proof sync log db queries unit test --- tapdb/universe_federation_test.go | 278 ++++++++++++++++++++++++++++++ 1 file changed, 278 insertions(+) diff --git a/tapdb/universe_federation_test.go b/tapdb/universe_federation_test.go index 3ffbf80a0..7492a2c61 100644 --- a/tapdb/universe_federation_test.go +++ b/tapdb/universe_federation_test.go @@ -87,6 +87,284 @@ func TestUniverseFederationCRUD(t *testing.T) { require.NoError(t, err) } +// TestFederationProofSyncLogCRUD tests that we can add, modify, and remove +// proof sync log entries from the Universe DB. +func TestFederationProofSyncLogCRUD(t *testing.T) { + t.Parallel() + + var ( + ctx = context.Background() + dbHandle = NewDbHandle(t) + fedStore = dbHandle.UniverseFederationStore + ) + + // Populate the database with a random asset, its associated proof, and + // a set of servers. + testAsset, testAnnotatedProof := dbHandle.AddRandomAssetProof(t) + uniProof := dbHandle.AddUniProofLeaf(t, testAsset, testAnnotatedProof) + uniId := universe.NewUniIDFromAsset(*testAsset) + + servers := dbHandle.AddRandomServerAddrs(t, 3) + + // Designate pending sync status for all servers except the first. + // Make a map set of pending sync servers. + pendingSyncServers := make(map[universe.ServerAddr]struct{}) + for i := range servers { + server := servers[i] + if i == 0 { + continue + } + pendingSyncServers[server] = struct{}{} + } + + // Add log entries for the first server. + syncServer := servers[0] + + // Add push log entry. + _, err := fedStore.UpsertFederationProofSyncLog( + ctx, uniId, uniProof.LeafKey, syncServer, + universe.SyncDirectionPush, universe.ProofSyncStatusComplete, + true, + ) + require.NoError(t, err) + + // Add pull log entry. + _, err = fedStore.UpsertFederationProofSyncLog( + ctx, uniId, uniProof.LeafKey, syncServer, + universe.SyncDirectionPull, universe.ProofSyncStatusComplete, + true, + ) + require.NoError(t, err) + + // We've already added log entries for the first server. We will now + // insert new proof sync log entries for the remaining servers. + for _, server := range servers[1:] { + _, err := fedStore.UpsertFederationProofSyncLog( + ctx, uniId, uniProof.LeafKey, server, + universe.SyncDirectionPush, + universe.ProofSyncStatusPending, false, + ) + require.NoError(t, err) + } + + // Retrieve all sync status pending log entries. + syncDirectionPush := universe.SyncDirectionPush + pendingLogEntries, err := fedStore.FetchPendingProofsSyncLog( + ctx, &syncDirectionPush, + ) + require.NoError(t, err) + require.Len(t, pendingLogEntries, 2) + + for i := range pendingLogEntries { + entry := pendingLogEntries[i] + require.Equal( + t, universe.ProofSyncStatusPending, entry.SyncStatus, + ) + require.Equal( + t, universe.SyncDirectionPush, entry.SyncDirection, + ) + require.Equal(t, uniId.String(), entry.UniID.String()) + require.Equal(t, int64(0), entry.AttemptCounter) + + assertProofSyncLogLeafKey(t, uniProof.LeafKey, entry.LeafKey) + assertProofSyncLogLeaf(t, *uniProof.Leaf, entry.Leaf) + + // Check for server address in pending sync server set. + _, ok := pendingSyncServers[entry.ServerAddr] + require.True(t, ok) + } + + // Retrieve all push sync status complete log entries. + completePushLogEntries, err := fedStore.QueryFederationProofSyncLog( + ctx, uniId, uniProof.LeafKey, universe.SyncDirectionPush, + universe.ProofSyncStatusComplete, + ) + require.NoError(t, err) + + // There should only be one complete push log entry. + require.Len(t, completePushLogEntries, 1) + + // Check that the complete log entry is as expected. + completePushEntry := completePushLogEntries[0] + + require.Equal(t, servers[0], completePushEntry.ServerAddr) + require.Equal( + t, universe.ProofSyncStatusComplete, + completePushEntry.SyncStatus, + ) + require.Equal( + t, universe.SyncDirectionPush, completePushEntry.SyncDirection, + ) + require.Equal(t, uniId.String(), completePushEntry.UniID.String()) + require.Equal(t, int64(0), completePushEntry.AttemptCounter) + + assertProofSyncLogLeafKey( + t, uniProof.LeafKey, completePushEntry.LeafKey, + ) + assertProofSyncLogLeaf(t, *uniProof.Leaf, completePushEntry.Leaf) + + // Retrieve all pull sync status complete log entries. + completePullLogEntries, err := fedStore.QueryFederationProofSyncLog( + ctx, uniId, uniProof.LeafKey, universe.SyncDirectionPull, + universe.ProofSyncStatusComplete, + ) + require.NoError(t, err) + + // There should only be one complete push log entry. + require.Len(t, completePullLogEntries, 1) + + // Check that the complete log entry is as expected. + completePullEntry := completePullLogEntries[0] + + require.Equal(t, servers[0], completePullEntry.ServerAddr) + require.Equal( + t, universe.ProofSyncStatusComplete, + completePullEntry.SyncStatus, + ) + require.Equal( + t, universe.SyncDirectionPull, completePullEntry.SyncDirection, + ) + require.Equal(t, uniId.String(), completePullEntry.UniID.String()) + require.Equal(t, int64(0), completePullEntry.AttemptCounter) + + assertProofSyncLogLeafKey( + t, uniProof.LeafKey, completePullEntry.LeafKey, + ) + assertProofSyncLogLeaf(t, *uniProof.Leaf, completePullEntry.Leaf) + + // Increment the attempt counter for one of the pending log entries. + _, err = fedStore.UpsertFederationProofSyncLog( + ctx, uniId, uniProof.LeafKey, servers[1], + universe.SyncDirectionPush, universe.ProofSyncStatusPending, + true, + ) + require.NoError(t, err) + + // Check that the attempt counter was incremented as expected. + pendingLogEntries, err = fedStore.QueryFederationProofSyncLog( + ctx, uniId, uniProof.LeafKey, universe.SyncDirectionPush, + universe.ProofSyncStatusPending, + ) + require.NoError(t, err) + require.Len(t, pendingLogEntries, 2) + + for i := range pendingLogEntries { + entry := pendingLogEntries[i] + if entry.ServerAddr == servers[1] { + require.Equal(t, int64(1), entry.AttemptCounter) + } else { + require.Equal(t, int64(0), entry.AttemptCounter) + } + } + + // Upsert without incrementing the attempt counter for one of the + // pending log entries. + _, err = fedStore.UpsertFederationProofSyncLog( + ctx, uniId, uniProof.LeafKey, servers[1], + universe.SyncDirectionPush, universe.ProofSyncStatusPending, + false, + ) + require.NoError(t, err) + + // Check that the attempt counter was not changed as expected. + pendingLogEntries, err = fedStore.QueryFederationProofSyncLog( + ctx, uniId, uniProof.LeafKey, universe.SyncDirectionPush, + universe.ProofSyncStatusPending, + ) + require.NoError(t, err) + require.Len(t, pendingLogEntries, 2) + + for i := range pendingLogEntries { + entry := pendingLogEntries[i] + if entry.ServerAddr == servers[1] { + require.Equal(t, int64(1), entry.AttemptCounter) + } else { + require.Equal(t, int64(0), entry.AttemptCounter) + } + } + + // Set the sync status to complete for one of the pending log entries. + _, err = fedStore.UpsertFederationProofSyncLog( + ctx, uniId, uniProof.LeafKey, servers[1], + universe.SyncDirectionPush, universe.ProofSyncStatusComplete, + false, + ) + require.NoError(t, err) + + // Check that the sync status was updated as expected. + pendingLogEntries, err = fedStore.QueryFederationProofSyncLog( + ctx, uniId, uniProof.LeafKey, universe.SyncDirectionPush, + universe.ProofSyncStatusPending, + ) + require.NoError(t, err) + require.Len(t, pendingLogEntries, 1) + + completePushLogEntries, err = fedStore.QueryFederationProofSyncLog( + ctx, uniId, uniProof.LeafKey, universe.SyncDirectionPush, + universe.ProofSyncStatusComplete, + ) + require.NoError(t, err) + require.Len(t, completePushLogEntries, 2) + + // Delete log entries for one of the servers. + err = fedStore.DeleteProofsSyncLogEntries(ctx, servers[0], servers[1]) + require.NoError(t, err) + + // Only one log entry should remain and it should have sync status + // pending. + pendingLogEntries, err = fedStore.QueryFederationProofSyncLog( + ctx, uniId, uniProof.LeafKey, universe.SyncDirectionPush, + universe.ProofSyncStatusPending, + ) + require.NoError(t, err) + require.Len(t, pendingLogEntries, 1) + + // Check that the remaining log entry is as expected. + pendingEntry := pendingLogEntries[0] + require.Equal(t, servers[2], pendingEntry.ServerAddr) +} + +// assertProofSyncLogLeafKey asserts that a leaf key derived from a proof sync +// log entry is equal to a given leaf key. +func assertProofSyncLogLeafKey(t *testing.T, actualLeafKey universe.LeafKey, + logLeafKey universe.LeafKey) { + + // We can safely ignore the tweaked script key as it is the derivation + // information for the script key. It is only ever known to the owner of + // the asset and is never serialized in a proof + actualLeafKey.ScriptKey.TweakedScriptKey = nil + require.Equal(t, actualLeafKey, logLeafKey) +} + +// assertProofSyncLogLeaf asserts that a leaf derived from a proof sync log +// entry is equal to a given universe leaf. +func assertProofSyncLogLeaf(t *testing.T, actualLeaf universe.Leaf, + logLeaf universe.Leaf) { + + if actualLeaf.GenesisWithGroup.GroupKey != nil { + // We can safely ignore the group key witness as it is the + // basically just extracted from the asset and won't be relevant + // when parsing the proof. + actualLeaf.GenesisWithGroup.GroupKey.Witness = nil + + // We can safely ignore the pre-tweaked group key + // (GroupKey.RawKey) as it is the derivation information for the + // group key. It is only ever known to the owner of the asset + // and is never serialized in a proof. + actualLeaf.GenesisWithGroup.GroupKey.RawKey.PubKey = nil + } + + require.Equal(t, actualLeaf.Amt, logLeaf.Amt) + require.Equal(t, actualLeaf.RawProof, logLeaf.RawProof) + require.Equal(t, actualLeaf.GenesisWithGroup, logLeaf.GenesisWithGroup) + + // We compare the assets with our custom asset quality function as the + // SplitCommitmentRoot field MS-SMT node types will differ. A computed + // node is derived from the database data whereas the generated asset + // may have a MS-SMT branch node type. + actualLeaf.Asset.DeepEqual(logLeaf.Asset) +} + // TestFederationConfigDefault tests that we're able to fetch the default // federation config. func TestFederationConfigDefault(t *testing.T) { From dab84f18b906b8233f10dc2dd3612143158a45e5 Mon Sep 17 00:00:00 2001 From: ffranr Date: Wed, 6 Dec 2023 14:27:17 +0000 Subject: [PATCH 31/54] tapdb: log path to sqlite test db --- tapdb/sqlite.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tapdb/sqlite.go b/tapdb/sqlite.go index 500980fcb..2a3b2d064 100644 --- a/tapdb/sqlite.go +++ b/tapdb/sqlite.go @@ -160,11 +160,11 @@ func NewSqliteStore(cfg *SqliteConfig) (*SqliteStore, error) { func NewTestSqliteDB(t *testing.T) *SqliteStore { t.Helper() - t.Logf("Creating new SQLite DB for testing") + dbFileName := filepath.Join(t.TempDir(), "tmp.db") + t.Logf("Creating new SQLite DB for testing: %s", dbFileName) // TODO(roasbeef): if we pass :memory: for the file name, then we get // an in mem version to speed up tests - dbFileName := filepath.Join(t.TempDir(), "tmp.db") sqlDB, err := NewSqliteStore(&SqliteConfig{ DatabaseFileName: dbFileName, SkipMigrations: false, From 446794ae062ea2bc58d515d13237eb291e6f177a Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Mon, 11 Dec 2023 15:18:47 +0100 Subject: [PATCH 32/54] tapcfg+tapgarden: remove conditions around proof courier Now that we have enabled a proof courier for all integration tests, we don't need to conditionally load the config anymore. --- tapcfg/server.go | 21 +++++++++------------ tapgarden/custodian.go | 7 ------- 2 files changed, 9 insertions(+), 19 deletions(-) diff --git a/tapcfg/server.go b/tapcfg/server.go index 37786b96b..7e6aca378 100644 --- a/tapcfg/server.go +++ b/tapcfg/server.go @@ -231,18 +231,6 @@ func genServerConfig(cfg *Config, cfgLogger btclog.Logger, } } - // TODO(ffranr): This logic is leftover for integration tests which - // do not yet enable a proof courier. Remove once all integration tests - // support a proof courier. - var proofCourierCfg *proof.CourierCfg - if cfg.HashMailCourier != nil { - proofCourierCfg = &proof.CourierCfg{ - ReceiverAckTimeout: cfg.HashMailCourier.ReceiverAckTimeout, - BackoffCfg: cfg.HashMailCourier.BackoffCfg, - TransferLog: assetStore, - } - } - reOrgWatcher := tapgarden.NewReOrgWatcher(&tapgarden.ReOrgWatcherConfig{ ChainBridge: chainBridge, GroupVerifier: tapgarden.GenGroupVerifier( @@ -330,6 +318,15 @@ func genServerConfig(cfg *Config, cfgLogger btclog.Logger, ChainParams: &tapChainParams, }) + // Addresses can have different proof couriers configured, but both + // types of couriers that currently exist will receive this config upon + // initialization. + proofCourierCfg := &proof.CourierCfg{ + ReceiverAckTimeout: cfg.HashMailCourier.ReceiverAckTimeout, + BackoffCfg: cfg.HashMailCourier.BackoffCfg, + TransferLog: assetStore, + } + return &tap.Config{ DebugLevel: cfg.DebugLevel, RuntimeID: runtimeID, diff --git a/tapgarden/custodian.go b/tapgarden/custodian.go index 024c974f0..f5d3565ee 100644 --- a/tapgarden/custodian.go +++ b/tapgarden/custodian.go @@ -400,13 +400,6 @@ func (c *Custodian) inspectWalletTx(walletTx *lndclient.Transaction) error { continue } - // TODO(ffranr): This proof courier disabled check should be - // removed. It was implemented because some integration test do - // not setup and use a proof courier. - if c.cfg.ProofCourierCfg == nil { - continue - } - // Now that we've seen this output on chain, we'll launch a // goroutine to use the ProofCourier to import the proof into // our local DB. From 36c2a0bb66a761279fa2617ac58fee6dba51aa6c Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Wed, 3 Jan 2024 13:57:01 +0100 Subject: [PATCH 33/54] multi: create unique cfg for each proof courier With this commit we disentangle the configurations for the two proof couriers, so we can configure them individually in a proper way. --- itest/tapd_harness.go | 31 +++++++++++++++++++++---------- proof/courier.go | 22 +++++++++++----------- tapcfg/config.go | 14 ++++++++++++-- 3 files changed, 44 insertions(+), 23 deletions(-) diff --git a/itest/tapd_harness.go b/itest/tapd_harness.go index 1c500875a..25220115b 100644 --- a/itest/tapd_harness.go +++ b/itest/tapd_harness.go @@ -47,9 +47,19 @@ var ( "allow the postgres fixture to run in total. Needs "+ "to be increased for long-running tests.") - // defaultBackoffConfig is the default backoff config we'll use for - // sending proofs. - defaultBackoffConfig = proof.BackoffCfg{ + // defaultHashmailBackoffConfig is the default backoff config we'll use + // for sending proofs with the hashmail courier. + defaultHashmailBackoffConfig = proof.BackoffCfg{ + BackoffResetWait: time.Second, + NumTries: 5, + InitialBackoff: 300 * time.Millisecond, + MaxBackoff: 600 * time.Millisecond, + } + + // defaultUniverseRpcBackoffConfig is the default backoff config we'll + // use for sending proofs with the universe RPC courier. + defaultUniverseRpcBackoffConfig = proof.BackoffCfg{ + SkipInitDelay: true, BackoffResetWait: time.Second, NumTries: 5, InitialBackoff: 300 * time.Millisecond, @@ -200,9 +210,11 @@ func newTapdHarness(t *testing.T, ht *harnessTest, cfg tapdConfig, // Populate proof courier specific config fields. // // Use passed in backoff config or default config. - backoffCfg := defaultBackoffConfig + hashmailBackoffCfg := defaultHashmailBackoffConfig + universeRpcBackoffCfg := defaultUniverseRpcBackoffConfig if opts.proofSendBackoffCfg != nil { - backoffCfg = *opts.proofSendBackoffCfg + hashmailBackoffCfg = *opts.proofSendBackoffCfg + universeRpcBackoffCfg = *opts.proofSendBackoffCfg } // Used passed in proof receiver ack timeout or default. @@ -211,12 +223,12 @@ func newTapdHarness(t *testing.T, ht *harnessTest, cfg tapdConfig, receiverAckTimeout = *opts.proofReceiverAckTimeout } - // TODO(ffranr): Disentangle the hashmail config from the universe RPC - // courier config. Right now, the universe courier takes the backoff - // config from the hashmail courier config. finalCfg.HashMailCourier = &proof.HashMailCourierCfg{ ReceiverAckTimeout: receiverAckTimeout, - BackoffCfg: &backoffCfg, + BackoffCfg: &hashmailBackoffCfg, + } + finalCfg.UniverseRpcCourier = &proof.UniverseRpcCourierCfg{ + BackoffCfg: &universeRpcBackoffCfg, } switch typedProofCourier := (opts.proofCourier).(type) { @@ -234,7 +246,6 @@ func newTapdHarness(t *testing.T, ht *harnessTest, cfg tapdConfig, default: finalCfg.DefaultProofCourierAddr = "" - finalCfg.HashMailCourier = nil } ht.t.Logf("Using proof courier address: %v", diff --git a/proof/courier.go b/proof/courier.go index f161b8190..6f4fcaa3e 100644 --- a/proof/courier.go +++ b/proof/courier.go @@ -185,13 +185,6 @@ func (h *UniverseRpcCourierAddr) Url() *url.URL { func (h *UniverseRpcCourierAddr) NewCourier(_ context.Context, cfg *CourierCfg, recipient Recipient) (Courier, error) { - // Skip the initial delivery delay for the universe RPC courier. - // This courier skips the initial delay because it uses the backoff - // procedure for each proof within a proof file separately. - // Consequently, if we attempt to perform two consecutive send events - // which share the same proof lineage (matching ancestral proofs), the - // second send event will be delayed by the initial delay. - cfg.BackoffCfg.SkipInitDelay = true backoffHandle := NewBackoffHandler(cfg.BackoffCfg, cfg.TransferLog) // Ensure that the courier address is a universe RPC address. @@ -557,10 +550,10 @@ func (e *BackoffExecError) Error() string { // BackoffCfg configures the behaviour of the proof delivery backoff procedure. type BackoffCfg struct { - // SkipInitDelay is a flag that indicates whether we should skip - // the initial delay before attempting to deliver the proof to the - // receiver. - SkipInitDelay bool + // SkipInitDelay is a flag that indicates whether we should skip the + // initial delay before attempting to deliver the proof to the receiver + // or receiving from the sender. + SkipInitDelay bool `long:"skipinitdelay" description:"Skip the initial delay before attempting to deliver the proof to the receiver or receiving from the sender."` // BackoffResetWait is the amount of time we'll wait before // resetting the backoff counter to its initial state. @@ -1049,6 +1042,13 @@ func (h *HashMailCourier) SetSubscribers( // proof.Courier interface. var _ Courier = (*HashMailCourier)(nil) +// UniverseRpcCourierCfg is the config for the universe RPC proof courier. +type UniverseRpcCourierCfg struct { + // BackoffCfg configures the behaviour of the proof delivery + // functionality. + BackoffCfg *BackoffCfg +} + // UniverseRpcCourier is a universe RPC proof courier service handle. It // implements the Courier interface. type UniverseRpcCourier struct { diff --git a/tapcfg/config.go b/tapcfg/config.go index d5f6c5f5c..ebccde50e 100644 --- a/tapcfg/config.go +++ b/tapcfg/config.go @@ -303,8 +303,9 @@ type Config struct { ReOrgSafeDepth int32 `long:"reorgsafedepth" description:"The number of confirmations we'll wait for before considering a transaction safely buried in the chain."` // The following options are used to configure the proof courier. - DefaultProofCourierAddr string `long:"proofcourieraddr" description:"Default proof courier service address."` - HashMailCourier *proof.HashMailCourierCfg `group:"proofcourier" namespace:"hashmailcourier"` + DefaultProofCourierAddr string `long:"proofcourieraddr" description:"Default proof courier service address."` + HashMailCourier *proof.HashMailCourierCfg `group:"hashmailcourier" namespace:"hashmailcourier"` + UniverseRpcCourier *proof.UniverseRpcCourierCfg `group:"universerpccourier" namespace:"universerpccourier"` CustodianProofRetrievalDelay time.Duration `long:"custodianproofretrievaldelay" description:"The number of seconds the custodian waits after identifying an asset transfer on-chain and before retrieving the corresponding proof."` @@ -391,6 +392,15 @@ func DefaultConfig() Config { MaxBackoff: defaultProofTransferMaxBackoff, }, }, + UniverseRpcCourier: &proof.UniverseRpcCourierCfg{ + BackoffCfg: &proof.BackoffCfg{ + SkipInitDelay: true, + BackoffResetWait: defaultProofTransferBackoffResetWait, + NumTries: defaultProofTransferNumTries, + InitialBackoff: defaultProofTransferInitialBackoff, + MaxBackoff: defaultProofTransferMaxBackoff, + }, + }, CustodianProofRetrievalDelay: defaultProofRetrievalDelay, Universe: &UniverseConfig{ SyncInterval: defaultUniverseSyncInterval, From 664ec838ef75a6ebf2cc68dc3e3decbb84145dac Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Mon, 11 Dec 2023 15:18:52 +0100 Subject: [PATCH 34/54] proof: add MockProofCourier --- proof/mock.go | 76 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) diff --git a/proof/mock.go b/proof/mock.go index 55c5d1ea6..05191a04d 100644 --- a/proof/mock.go +++ b/proof/mock.go @@ -5,6 +5,7 @@ import ( "context" "encoding/hex" "io" + "sync" "testing" "time" @@ -13,6 +14,7 @@ import ( "github.com/btcsuite/btcd/wire" "github.com/lightninglabs/taproot-assets/asset" "github.com/lightninglabs/taproot-assets/commitment" + "github.com/lightninglabs/taproot-assets/fn" "github.com/lightninglabs/taproot-assets/internal/test" "github.com/stretchr/testify/require" ) @@ -74,6 +76,80 @@ func MockGroupAnchorVerifier(gen *asset.Genesis, return nil } +// MockProofCourier is a mock proof courier which stores the last proof it +// received. +type MockProofCourier struct { + sync.Mutex + + currentProofs map[asset.SerializedKey]*AnnotatedProof + + subscribers map[uint64]*fn.EventReceiver[fn.Event] +} + +// NewMockProofCourier returns a new mock proof courier. +func NewMockProofCourier() *MockProofCourier { + return &MockProofCourier{ + currentProofs: make(map[asset.SerializedKey]*AnnotatedProof), + } +} + +// Start starts the proof courier service. +func (m *MockProofCourier) Start(chan error) error { + return nil +} + +// Stop stops the proof courier service. +func (m *MockProofCourier) Stop() error { + return nil +} + +// DeliverProof attempts to delivery a proof to the receiver, using the +// information in the Addr type. +func (m *MockProofCourier) DeliverProof(_ context.Context, + proof *AnnotatedProof) error { + + m.Lock() + defer m.Unlock() + + m.currentProofs[asset.ToSerialized(&proof.ScriptKey)] = proof + + return nil +} + +// ReceiveProof attempts to obtain a proof as identified by the passed +// locator from the source encapsulated within the specified address. +func (m *MockProofCourier) ReceiveProof(_ context.Context, + loc Locator) (*AnnotatedProof, error) { + + m.Lock() + defer m.Unlock() + + proof, ok := m.currentProofs[asset.ToSerialized(&loc.ScriptKey)] + if !ok { + return nil, ErrProofNotFound + } + + return proof, nil +} + +// SetSubscribers sets the set of subscribers that will be notified +// of proof courier related events. +func (m *MockProofCourier) SetSubscribers( + subscribers map[uint64]*fn.EventReceiver[fn.Event]) { + + m.Lock() + defer m.Unlock() + + m.subscribers = subscribers +} + +// Close stops the courier instance. +func (m *MockProofCourier) Close() error { + return nil +} + +var _ Courier = (*MockProofCourier)(nil) + type ValidTestCase struct { Proof *TestProof `json:"proof"` Expected string `json:"expected"` From 76f33c47e322aabe67a7a5f8b77fcdcd7f9cbc42 Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Mon, 11 Dec 2023 15:18:53 +0100 Subject: [PATCH 35/54] proof+tapcfg: add CourierDispatch interface Instead of needing to create a special proof courier address and then creating a courier from that, we want to have a central dispatcher that can hold on to the configuration for each of the different couriers. A courier is then created through the dispatcher directly. --- proof/courier.go | 133 ++++++++++++++++++++++++++++++++++++++++------- tapcfg/server.go | 6 +-- 2 files changed, 117 insertions(+), 22 deletions(-) diff --git a/proof/courier.go b/proof/courier.go index 6f4fcaa3e..c91d836a3 100644 --- a/proof/courier.go +++ b/proof/courier.go @@ -72,6 +72,113 @@ type Courier interface { Close() error } +// CourierCfg contains general config parameters applicable to all proof +// couriers. +type CourierCfg struct { + // HashMailCfg contains hashmail protocol specific config parameters. + HashMailCfg *HashMailCourierCfg + + // UniverseRpcCfg contains universe RPC protocol specific config + // parameters. + UniverseRpcCfg *UniverseRpcCourierCfg + + // TransferLog is a log for recording proof delivery and retrieval + // attempts. + TransferLog TransferLog +} + +// CourierDispatch is an interface that abstracts away the different proof +// courier services that are supported. +type CourierDispatch interface { + // NewCourier instantiates a new courier service handle given a service + // URL address. + NewCourier(addr *url.URL, recipient Recipient) (Courier, error) +} + +// URLDispatch is a proof courier dispatch that uses the courier address URL +// scheme to determine which courier service to use. +type URLDispatch struct { + cfg *CourierCfg +} + +// NewCourierDispatch creates a new proof courier dispatch. +func NewCourierDispatch(cfg *CourierCfg) *URLDispatch { + return &URLDispatch{ + cfg: cfg, + } +} + +// NewCourier instantiates a new courier service handle given a service URL +// address. +func (u *URLDispatch) NewCourier(addr *url.URL, + recipient Recipient) (Courier, error) { + + subscribers := make(map[uint64]*fn.EventReceiver[fn.Event]) + + // Create new courier addr based on URL scheme. + switch addr.Scheme { + case HashmailCourierType: + cfg := u.cfg.HashMailCfg + backoffHandler := NewBackoffHandler( + cfg.BackoffCfg, u.cfg.TransferLog, + ) + + hashMailCfg := HashMailCourierCfg{ + ReceiverAckTimeout: cfg.ReceiverAckTimeout, + } + + hashMailBox, err := NewHashMailBox(addr) + if err != nil { + return nil, fmt.Errorf("unable to make mailbox: %v", + err) + } + + return &HashMailCourier{ + cfg: &hashMailCfg, + backoffHandle: backoffHandler, + recipient: recipient, + mailbox: hashMailBox, + subscribers: subscribers, + }, nil + + case UniverseRpcCourierType: + cfg := u.cfg.UniverseRpcCfg + backoffHandler := NewBackoffHandler( + cfg.BackoffCfg, u.cfg.TransferLog, + ) + + // Connect to the universe RPC server. + dialOpts, err := serverDialOpts() + if err != nil { + return nil, err + } + + serverAddr := fmt.Sprintf("%s:%s", addr.Hostname(), addr.Port()) + conn, err := grpc.Dial(serverAddr, dialOpts...) + if err != nil { + return nil, err + } + + client := unirpc.NewUniverseClient(conn) + + return &UniverseRpcCourier{ + recipient: recipient, + client: client, + backoffHandle: backoffHandler, + transfer: u.cfg.TransferLog, + subscribers: subscribers, + }, nil + + default: + return nil, fmt.Errorf("unknown courier address protocol "+ + "(consider updating tapd): %v", addr.Scheme) + } +} + +// A compile-time assertion to ensure that the URLDispatch meets the +// CourierDispatch interface. +var _ CourierDispatch = (*URLDispatch)(nil) + // CourierAddr is a fully validated courier address (including protocol specific // validation). type CourierAddr interface { @@ -126,10 +233,12 @@ func (h *HashMailCourierAddr) Url() *url.URL { func (h *HashMailCourierAddr) NewCourier(_ context.Context, cfg *CourierCfg, recipient Recipient) (Courier, error) { - backoffHandle := NewBackoffHandler(cfg.BackoffCfg, cfg.TransferLog) + backoffHandle := NewBackoffHandler( + cfg.HashMailCfg.BackoffCfg, cfg.TransferLog, + ) hashMailCfg := HashMailCourierCfg{ - ReceiverAckTimeout: cfg.ReceiverAckTimeout, + ReceiverAckTimeout: cfg.HashMailCfg.ReceiverAckTimeout, } hashMailBox, err := NewHashMailBox(&h.addr) @@ -185,7 +294,9 @@ func (h *UniverseRpcCourierAddr) Url() *url.URL { func (h *UniverseRpcCourierAddr) NewCourier(_ context.Context, cfg *CourierCfg, recipient Recipient) (Courier, error) { - backoffHandle := NewBackoffHandler(cfg.BackoffCfg, cfg.TransferLog) + backoffHandle := NewBackoffHandler( + cfg.UniverseRpcCfg.BackoffCfg, cfg.TransferLog, + ) // Ensure that the courier address is a universe RPC address. if h.addr.Scheme != UniverseRpcCourierType { @@ -251,22 +362,6 @@ func NewCourier(ctx context.Context, addr url.URL, cfg *CourierCfg, return courierAddr.NewCourier(ctx, cfg, recipient) } -// CourierCfg contains general config parameters applicable to all proof -// couriers. -type CourierCfg struct { - // ReceiverAckTimeout is the maximum time we'll wait for the receiver to - // acknowledge the proof. - ReceiverAckTimeout time.Duration - - // BackoffCfg configures the behaviour of the proof delivery - // functionality. - BackoffCfg *BackoffCfg - - // TransferLog is a log for recording proof delivery and retrieval - // attempts. - TransferLog TransferLog -} - // ProofMailbox represents an abstract store-and-forward mailbox that can be // used to send/receive proofs. type ProofMailbox interface { diff --git a/tapcfg/server.go b/tapcfg/server.go index 7e6aca378..2f478051f 100644 --- a/tapcfg/server.go +++ b/tapcfg/server.go @@ -322,9 +322,9 @@ func genServerConfig(cfg *Config, cfgLogger btclog.Logger, // types of couriers that currently exist will receive this config upon // initialization. proofCourierCfg := &proof.CourierCfg{ - ReceiverAckTimeout: cfg.HashMailCourier.ReceiverAckTimeout, - BackoffCfg: cfg.HashMailCourier.BackoffCfg, - TransferLog: assetStore, + HashMailCfg: cfg.HashMailCourier, + UniverseRpcCfg: cfg.UniverseRpcCourier, + TransferLog: assetStore, } return &tap.Config{ From db9386671d4036bcbcf45b0ab09e911bed4416ee Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Mon, 11 Dec 2023 15:18:54 +0100 Subject: [PATCH 36/54] multi: use proof courier dispatcher This commit switches the custodian and porter over to using the new proof courier dispatcher. This then allows us to delete a bunch of now unused code related to proof courier addresses. --- proof/courier.go | 184 ++++--------------------------- proof/mock.go | 15 +++ rpcserver.go | 15 +-- tapcfg/server.go | 37 ++++--- tapfreighter/chain_porter.go | 17 +-- tapfreighter/parcel.go | 2 +- tapgarden/custodian.go | 14 +-- tapgarden/custodian_test.go | 204 +++++++++++++++++++++++++++-------- 8 files changed, 235 insertions(+), 253 deletions(-) diff --git a/proof/courier.go b/proof/courier.go index c91d836a3..d250a098b 100644 --- a/proof/courier.go +++ b/proof/courier.go @@ -167,6 +167,7 @@ func (u *URLDispatch) NewCourier(addr *url.URL, backoffHandle: backoffHandler, transfer: u.cfg.TransferLog, subscribers: subscribers, + rawConn: conn, }, nil default: @@ -179,187 +180,40 @@ func (u *URLDispatch) NewCourier(addr *url.URL, // CourierDispatch interface. var _ CourierDispatch = (*URLDispatch)(nil) -// CourierAddr is a fully validated courier address (including protocol specific -// validation). -type CourierAddr interface { - // Url returns the url.URL representation of the courier address. - Url() *url.URL - - // NewCourier generates a new courier service handle. - NewCourier(ctx context.Context, cfg *CourierCfg, - recipient Recipient) (Courier, error) -} - -// ParseCourierAddrString parses a proof courier address string and returns a -// protocol specific courier address instance. -func ParseCourierAddrString(addr string) (CourierAddr, error) { - // Parse URI. +// ParseCourierAddress attempts to parse the given string as a proof courier +// address, validates that all required fields are present and ensures the +// protocol is one of the supported protocols. +func ParseCourierAddress(addr string) (*url.URL, error) { urlAddr, err := url.ParseRequestURI(addr) if err != nil { return nil, fmt.Errorf("invalid proof courier URI address: %w", err) } - return ParseCourierAddrUrl(*urlAddr) -} - -// ParseCourierAddrUrl parses a proof courier address url.URL and returns a -// protocol specific courier address instance. -func ParseCourierAddrUrl(addr url.URL) (CourierAddr, error) { - // Create new courier addr based on URL scheme. - switch addr.Scheme { - case HashmailCourierType: - return NewHashMailCourierAddr(addr) - case UniverseRpcCourierType: - return NewUniverseRpcCourierAddr(addr) - } - - return nil, fmt.Errorf("unknown courier address protocol "+ - "(consider updating tapd): %v", addr.Scheme) -} - -// HashMailCourierAddr is a hashmail protocol specific implementation of the -// CourierAddr interface. -type HashMailCourierAddr struct { - addr url.URL -} - -// Url returns the url.URL representation of the hashmail courier address. -func (h *HashMailCourierAddr) Url() *url.URL { - return &h.addr -} - -// NewCourier generates a new courier service handle. -func (h *HashMailCourierAddr) NewCourier(_ context.Context, cfg *CourierCfg, - recipient Recipient) (Courier, error) { - - backoffHandle := NewBackoffHandler( - cfg.HashMailCfg.BackoffCfg, cfg.TransferLog, - ) - - hashMailCfg := HashMailCourierCfg{ - ReceiverAckTimeout: cfg.HashMailCfg.ReceiverAckTimeout, - } - - hashMailBox, err := NewHashMailBox(&h.addr) - if err != nil { - return nil, fmt.Errorf("unable to make mailbox: %v", - err) - } - - subscribers := make( - map[uint64]*fn.EventReceiver[fn.Event], - ) - return &HashMailCourier{ - cfg: &hashMailCfg, - backoffHandle: backoffHandle, - recipient: recipient, - mailbox: hashMailBox, - subscribers: subscribers, - }, nil -} - -// NewHashMailCourierAddr generates a new hashmail courier address from a given -// URL. This function also performs hashmail protocol specific address -// validation. -func NewHashMailCourierAddr(addr url.URL) (*HashMailCourierAddr, error) { - if addr.Scheme != HashmailCourierType { - return nil, fmt.Errorf("expected hashmail courier protocol: %v", - addr.Scheme) - } - - // We expect the port number to be specified for a hashmail service. - if addr.Port() == "" { - return nil, fmt.Errorf("hashmail proof courier URI address " + - "port unspecified") - } - - return &HashMailCourierAddr{ - addr, - }, nil -} - -// UniverseRpcCourierAddr is a universe RPC protocol specific implementation of -// the CourierAddr interface. -type UniverseRpcCourierAddr struct { - addr url.URL -} - -// Url returns the url.URL representation of the courier address. -func (h *UniverseRpcCourierAddr) Url() *url.URL { - return &h.addr -} - -// NewCourier generates a new courier service handle. -func (h *UniverseRpcCourierAddr) NewCourier(_ context.Context, - cfg *CourierCfg, recipient Recipient) (Courier, error) { - - backoffHandle := NewBackoffHandler( - cfg.UniverseRpcCfg.BackoffCfg, cfg.TransferLog, - ) - - // Ensure that the courier address is a universe RPC address. - if h.addr.Scheme != UniverseRpcCourierType { - return nil, fmt.Errorf("unsupported courier protocol: %v", - h.addr.Scheme) - } - - // Connect to the universe RPC server. - dialOpts, err := serverDialOpts() - if err != nil { + if err := ValidateCourierAddress(urlAddr); err != nil { return nil, err } - serverAddr := fmt.Sprintf( - "%s:%s", h.addr.Hostname(), h.addr.Port(), - ) - conn, err := grpc.Dial(serverAddr, dialOpts...) - if err != nil { - return nil, err - } - - client := unirpc.NewUniverseClient(conn) - - // Instantiate the events subscribers map. - subscribers := make( - map[uint64]*fn.EventReceiver[fn.Event], - ) - - return &UniverseRpcCourier{ - recipient: recipient, - client: client, - backoffHandle: backoffHandle, - transfer: cfg.TransferLog, - subscribers: subscribers, - rawConn: conn, - }, nil + return urlAddr, nil } -// NewUniverseRpcCourierAddr generates a new universe RPC courier address from a -// given URL. This function also performs protocol specific address validation. -func NewUniverseRpcCourierAddr(addr url.URL) (*UniverseRpcCourierAddr, error) { +// ValidateCourierAddress validates that all required fields are present and +// ensures the protocol is one of the supported protocols. +func ValidateCourierAddress(addr *url.URL) error { // We expect the port number to be specified. if addr.Port() == "" { - return nil, fmt.Errorf("proof courier URI address port " + - "unspecified") + return fmt.Errorf("proof courier URI address port unspecified") } - return &UniverseRpcCourierAddr{ - addr, - }, nil -} - -// NewCourier instantiates a new courier service handle given a service URL -// address. -func NewCourier(ctx context.Context, addr url.URL, cfg *CourierCfg, - recipient Recipient) (Courier, error) { + switch addr.Scheme { + case HashmailCourierType, UniverseRpcCourierType: + // Valid and known courier address protocol. + return nil - courierAddr, err := ParseCourierAddrUrl(addr) - if err != nil { - return nil, err + default: + return fmt.Errorf("unknown courier address protocol "+ + "(consider updating tapd): %v", addr.Scheme) } - - return courierAddr.NewCourier(ctx, cfg, recipient) } // ProofMailbox represents an abstract store-and-forward mailbox that can be @@ -564,7 +418,7 @@ func (h *HashMailBox) RecvAck(ctx context.Context, sid streamID) error { return fmt.Errorf("expected ack, got %x", msg.Msg) } -// CleanUp atempts to tear down the mailbox as specified by the passed sid. +// CleanUp attempts to tear down the mailbox as specified by the passed sid. func (h *HashMailBox) CleanUp(ctx context.Context, sid streamID) error { streamAuth := &hashmailrpc.CipherBoxAuth{ Desc: &hashmailrpc.CipherBoxDesc{ diff --git a/proof/mock.go b/proof/mock.go index 05191a04d..c4810e8be 100644 --- a/proof/mock.go +++ b/proof/mock.go @@ -5,6 +5,7 @@ import ( "context" "encoding/hex" "io" + "net/url" "sync" "testing" "time" @@ -76,6 +77,20 @@ func MockGroupAnchorVerifier(gen *asset.Genesis, return nil } +// MockProofCourierDispatcher is a mock proof courier dispatcher which returns +// the same courier for all requests. +type MockProofCourierDispatcher struct { + Courier Courier +} + +// NewCourier instantiates a new courier service handle given a service +// URL address. +func (m *MockProofCourierDispatcher) NewCourier(*url.URL, Recipient) (Courier, + error) { + + return m.Courier, nil +} + // MockProofCourier is a mock proof courier which stores the last proof it // received. type MockProofCourier struct { diff --git a/rpcserver.go b/rpcserver.go index 992bd79d4..ac2809820 100644 --- a/rpcserver.go +++ b/rpcserver.go @@ -1056,19 +1056,14 @@ func (r *rpcServer) NewAddr(ctx context.Context, // the default specified in the config. courierAddr := r.cfg.DefaultProofCourierAddr if req.ProofCourierAddr != "" { - addr, err := proof.ParseCourierAddrString( + var err error + courierAddr, err = proof.ParseCourierAddress( req.ProofCourierAddr, ) if err != nil { return nil, fmt.Errorf("invalid proof courier "+ "address: %w", err) } - - // At this point, we do not intend on creating a proof courier - // service instance. We are only interested in parsing and - // validating the address. We therefore convert the address into - // an url.URL type for storage in the address book. - courierAddr = addr.Url() } // Check that the proof courier address is set. This should never @@ -1077,7 +1072,6 @@ func (r *rpcServer) NewAddr(ctx context.Context, if courierAddr == nil { return nil, fmt.Errorf("no proof courier address provided") } - proofCourierAddr := *courierAddr if len(req.AssetId) != 32 { return nil, fmt.Errorf("invalid asset id length") @@ -1114,8 +1108,7 @@ func (r *rpcServer) NewAddr(ctx context.Context, // Now that we have all the params, we'll try to add a new // address to the addr book. addr, err = r.cfg.AddrBook.NewAddress( - ctx, assetID, req.Amt, tapscriptSibling, - proofCourierAddr, + ctx, assetID, req.Amt, tapscriptSibling, *courierAddr, address.WithAssetVersion(assetVersion), ) if err != nil { @@ -1156,7 +1149,7 @@ func (r *rpcServer) NewAddr(ctx context.Context, // address to the addr book. addr, err = r.cfg.AddrBook.NewAddressWithKeys( ctx, assetID, req.Amt, *scriptKey, internalKey, - tapscriptSibling, proofCourierAddr, + tapscriptSibling, *courierAddr, address.WithAssetVersion(assetVersion), ) if err != nil { diff --git a/tapcfg/server.go b/tapcfg/server.go index 2f478051f..1020d2e24 100644 --- a/tapcfg/server.go +++ b/tapcfg/server.go @@ -212,7 +212,7 @@ func genServerConfig(cfg *Config, cfgLogger btclog.Logger, fallbackHashmailCourierAddr := fmt.Sprintf( "%s://%s", proof.HashmailCourierType, fallbackHashMailAddr, ) - proofCourierAddr, err := proof.ParseCourierAddrString( + proofCourierAddr, err := proof.ParseCourierAddress( fallbackHashmailCourierAddr, ) if err != nil { @@ -222,7 +222,7 @@ func genServerConfig(cfg *Config, cfgLogger btclog.Logger, // If default proof courier address is set, use it as the default. if cfg.DefaultProofCourierAddr != "" { - proofCourierAddr, err = proof.ParseCourierAddrString( + proofCourierAddr, err = proof.ParseCourierAddress( cfg.DefaultProofCourierAddr, ) if err != nil { @@ -321,11 +321,11 @@ func genServerConfig(cfg *Config, cfgLogger btclog.Logger, // Addresses can have different proof couriers configured, but both // types of couriers that currently exist will receive this config upon // initialization. - proofCourierCfg := &proof.CourierCfg{ + proofCourierDispatcher := proof.NewCourierDispatch(&proof.CourierCfg{ HashMailCfg: cfg.HashMailCourier, UniverseRpcCfg: cfg.UniverseRpcCourier, TransferLog: assetStore, - } + }) return &tap.Config{ DebugLevel: cfg.DebugLevel, @@ -359,19 +359,18 @@ func genServerConfig(cfg *Config, cfgLogger btclog.Logger, GroupVerifier: tapgarden.GenGroupVerifier( context.Background(), assetMintingStore, ), - AddrBook: addrBook, - ProofArchive: proofArchive, - ProofNotifier: assetStore, - ErrChan: mainErrChan, - ProofCourierCfg: proofCourierCfg, - ProofRetrievalDelay: cfg.CustodianProofRetrievalDelay, - ProofWatcher: reOrgWatcher, + AddrBook: addrBook, + ProofArchive: proofArchive, + ProofNotifier: assetStore, + ErrChan: mainErrChan, + ProofCourierDispatcher: proofCourierDispatcher, + ProofRetrievalDelay: cfg.CustodianProofRetrievalDelay, ProofWatcher: reOrgWatcher, }, ), ChainBridge: chainBridge, AddrBook: addrBook, AddrBookDisableSyncer: cfg.AddrBook.DisableSyncer, - DefaultProofCourierAddr: proofCourierAddr.Url(), + DefaultProofCourierAddr: proofCourierAddr, ProofArchive: proofArchive, AssetWallet: assetWallet, CoinSelect: coinSelect, @@ -384,13 +383,13 @@ func genServerConfig(cfg *Config, cfgLogger btclog.Logger, GroupVerifier: tapgarden.GenGroupVerifier( context.Background(), assetMintingStore, ), - Wallet: walletAnchor, - KeyRing: keyRing, - AssetWallet: assetWallet, - AssetProofs: proofFileStore, - ProofCourierCfg: proofCourierCfg, - ProofWatcher: reOrgWatcher, - ErrChan: mainErrChan, + Wallet: walletAnchor, + KeyRing: keyRing, + AssetWallet: assetWallet, + AssetProofs: proofFileStore, + ProofCourierDispatcher: proofCourierDispatcher, + ProofWatcher: reOrgWatcher, + ErrChan: mainErrChan, }, ), UniverseArchive: baseUni, diff --git a/tapfreighter/chain_porter.go b/tapfreighter/chain_porter.go index 8295567b0..d345cf2c1 100644 --- a/tapfreighter/chain_porter.go +++ b/tapfreighter/chain_porter.go @@ -60,9 +60,10 @@ type ChainPorterConfig struct { // TODO(roasbeef): replace with proof.Courier in the future/ AssetProofs proof.Archiver - // ProofCourierCfg is a general config applicable to all proof courier - // service handles. - ProofCourierCfg *proof.CourierCfg + // ProofCourierDispatcher is the dispatcher that is used to create new + // proof courier handles for sending proofs based on the protocol of + // a proof courier address. + ProofCourierDispatcher proof.CourierDispatch // ProofWatcher is used to watch new proofs for their anchor transaction // to be confirmed safely with a minimum number of confirmations. @@ -648,7 +649,7 @@ func (p *ChainPorter) transferReceiverProof(pkg *sendPackage) error { log.Debugf("Attempting to deliver proof for script key %x", key.SerializeCompressed()) - proofCourierAddr, err := proof.ParseCourierAddrString( + proofCourierAddr, err := proof.ParseCourierAddress( string(out.ProofCourierAddr), ) if err != nil { @@ -663,8 +664,8 @@ func (p *ChainPorter) transferReceiverProof(pkg *sendPackage) error { AssetID: *receiverProof.AssetID, Amount: out.Amount, } - courier, err := proofCourierAddr.NewCourier( - ctx, p.cfg.ProofCourierCfg, recipient, + courier, err := p.cfg.ProofCourierDispatcher.NewCourier( + proofCourierAddr, recipient, ) if err != nil { return fmt.Errorf("unable to initiate proof courier "+ @@ -700,7 +701,7 @@ func (p *ChainPorter) transferReceiverProof(pkg *sendPackage) error { return nil } - // If we have a proof courier instance active, then we'll launch several + // If we have a non-interactive proof, then we'll launch several // goroutines to deliver the proof(s) to the receiver(s). Since a // pre-signed parcel (a parcel that uses the RPC driven vPSBT flow) // doesn't have proof courier URLs (they aren't part of the vPSBT), the @@ -708,7 +709,7 @@ func (p *ChainPorter) transferReceiverProof(pkg *sendPackage) error { // to receiver, and we don't even need to attempt to use a proof // courier. _, isPreSigned := pkg.Parcel.(*PreSignedParcel) - if p.cfg.ProofCourierCfg != nil && !isPreSigned { + if !isPreSigned { ctx, cancel := p.WithCtxQuitNoTimeout() defer cancel() diff --git a/tapfreighter/parcel.go b/tapfreighter/parcel.go index fe6018730..53066864b 100644 --- a/tapfreighter/parcel.go +++ b/tapfreighter/parcel.go @@ -183,7 +183,7 @@ func (p *AddressParcel) Validate() error { tapAddr := p.destAddrs[idx] // Validate proof courier addresses. - _, err := proof.ParseCourierAddrUrl(tapAddr.ProofCourierAddr) + err := proof.ValidateCourierAddress(&tapAddr.ProofCourierAddr) if err != nil { return fmt.Errorf("invalid proof courier address: %w", err) diff --git a/tapgarden/custodian.go b/tapgarden/custodian.go index f5d3565ee..a981c9f76 100644 --- a/tapgarden/custodian.go +++ b/tapgarden/custodian.go @@ -71,7 +71,7 @@ type CustodianConfig struct { // ProofArchive is the storage backend for proofs to which we store new // incoming proofs. - ProofArchive proof.NotifyArchiver + ProofArchive proof.Archiver // ProofNotifier is the storage backend for proofs from which we are // notified about new proofs. This can be the same as the ProofArchive @@ -80,9 +80,10 @@ type CustodianConfig struct { // being available in the relational database). ProofNotifier proof.NotifyArchiver - // ProofCourierCfg is a general config applicable to all proof courier - // service handles. - ProofCourierCfg *proof.CourierCfg + // ProofCourierDispatcher is the dispatcher that is used to create new + // proof courier handles for receiving proofs based on the protocol of + // a proof courier address. + ProofCourierDispatcher proof.CourierDispatch // ProofRetrievalDelay is the time duration the custodian waits having // identified an asset transfer on-chain and before retrieving the @@ -422,9 +423,8 @@ func (c *Custodian) inspectWalletTx(walletTx *lndclient.Transaction) error { AssetID: assetID, Amount: addr.Amount, } - courier, err := proof.NewCourier( - ctx, addr.ProofCourierAddr, - c.cfg.ProofCourierCfg, recipient, + courier, err := c.cfg.ProofCourierDispatcher.NewCourier( + &addr.ProofCourierAddr, recipient, ) if err != nil { log.Errorf("unable to initiate proof courier "+ diff --git a/tapgarden/custodian_test.go b/tapgarden/custodian_test.go index 21f9276c9..28ca2a61c 100644 --- a/tapgarden/custodian_test.go +++ b/tapgarden/custodian_test.go @@ -1,9 +1,12 @@ package tapgarden_test import ( + "bytes" "context" "database/sql" + "fmt" "math/rand" + "net/url" "testing" "time" @@ -13,15 +16,16 @@ import ( "github.com/lightninglabs/lndclient" "github.com/lightninglabs/taproot-assets/address" "github.com/lightninglabs/taproot-assets/asset" + "github.com/lightninglabs/taproot-assets/commitment" "github.com/lightninglabs/taproot-assets/fn" "github.com/lightninglabs/taproot-assets/internal/test" "github.com/lightninglabs/taproot-assets/proof" "github.com/lightninglabs/taproot-assets/tapdb" - "github.com/lightninglabs/taproot-assets/tapdb/sqlc" "github.com/lightninglabs/taproot-assets/tapgarden" "github.com/lightninglabs/taproot-assets/tapscript" "github.com/lightningnetwork/lnd/clock" "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lntest/wait" "github.com/stretchr/testify/require" ) @@ -34,11 +38,9 @@ var ( ) // newAddrBook creates a new instance of the TapAddressBook book. -func newAddrBook(t *testing.T, keyRing *tapgarden.MockKeyRing, +func newAddrBookForDB(db *tapdb.BaseDB, keyRing *tapgarden.MockKeyRing, syncer *tapgarden.MockAssetSyncer) (*address.Book, - *tapdb.TapAddressBook, sqlc.Querier) { - - db := tapdb.NewTestDB(t) + *tapdb.TapAddressBook) { txCreator := func(tx *sql.Tx) tapdb.AddrBook { return db.WithTx(tx) @@ -54,12 +56,12 @@ func newAddrBook(t *testing.T, keyRing *tapgarden.MockKeyRing, Chain: *chainParams, KeyRing: keyRing, }) - return book, tapdbBook, db + return book, tapdbBook } // newProofArchive creates a new instance of the MultiArchiver. -func newProofArchive(t *testing.T) (*proof.MultiArchiver, *tapdb.AssetStore) { - db := tapdb.NewTestDB(t) +func newProofArchiveForDB(t *testing.T, db *tapdb.BaseDB) (*proof.MultiArchiver, + *tapdb.AssetStore) { txCreator := func(tx *sql.Tx) tapdb.ActiveAssetsStore { return db.WithTx(tx) @@ -90,7 +92,7 @@ type custodianHarness struct { addrBook *address.Book syncer *tapgarden.MockAssetSyncer assetDB *tapdb.AssetStore - proofArchive *proof.MultiArchiver + courier *proof.MockProofCourier } // assertStartup makes sure the custodian was started correctly. @@ -114,6 +116,48 @@ func (h *custodianHarness) eventually(fn func() bool) { require.Eventually(h.t, fn, testTimeout, testPollInterval) } +// assertEventsPresent makes sure that the given number of events is present in +// the address book, then returns those events. +func (h *custodianHarness) assertEventsPresent(numEvents int, + status address.Status) []*address.Event { + + ctx := context.Background() + ctxt, cancel := context.WithTimeout(ctx, testTimeout) + defer cancel() + + // Only one event should be registered though, as we've only created one + // transaction. + var finalEvents []*address.Event + err := wait.NoError(func() error { + events, err := h.tapdbBook.QueryAddrEvents( + ctxt, address.EventQueryParams{}, + ) + if err != nil { + return err + } + + if len(events) != numEvents { + return fmt.Errorf("wanted %d events but got %d", + numEvents, len(events)) + } + + for idx, event := range events { + if event.Status != status { + return fmt.Errorf("event %d has status %v "+ + "but wanted %v", idx, event.Status, + status) + } + } + + finalEvents = events + + return nil + }, testTimeout) + require.NoError(h.t, err) + + return finalEvents +} + // assertAddrsRegistered makes sure that for each of the given addresses a // pubkey was imported into the wallet. func (h *custodianHarness) assertAddrsRegistered( @@ -138,8 +182,14 @@ func newHarness(t *testing.T, walletAnchor := tapgarden.NewMockWalletAnchor() keyRing := tapgarden.NewMockKeyRing() syncer := tapgarden.NewMockAssetSyncer() - addrBook, tapdbBook, _ := newAddrBook(t, keyRing, syncer) - proofArchive, assetDB := newProofArchive(t) + db := tapdb.NewTestDB(t) + addrBook, tapdbBook := newAddrBookForDB(db.BaseDB, keyRing, syncer) + _, assetDB := newProofArchiveForDB(t, db.BaseDB) + courier := proof.NewMockProofCourier() + courierDispatch := &proof.MockProofCourierDispatcher{ + Courier: courier, + } + proofWatcher := &tapgarden.MockProofWatcher{} ctxb := context.Background() for _, initialAddr := range initialAddrs { @@ -148,13 +198,15 @@ func newHarness(t *testing.T, } cfg := &tapgarden.CustodianConfig{ - ChainParams: chainParams, - ChainBridge: chainBridge, - WalletAnchor: walletAnchor, - AddrBook: addrBook, - ProofArchive: proofArchive, - ProofNotifier: assetDB, - ErrChan: make(chan error, 1), + ChainParams: chainParams, + ChainBridge: chainBridge, + WalletAnchor: walletAnchor, + AddrBook: addrBook, + ProofArchive: assetDB, + ProofNotifier: assetDB, + ProofCourierDispatcher: courierDispatch, + ProofWatcher: proofWatcher, + ErrChan: make(chan error, 1), } return &custodianHarness{ t: t, @@ -167,20 +219,21 @@ func newHarness(t *testing.T, addrBook: addrBook, syncer: syncer, assetDB: assetDB, - proofArchive: proofArchive, + courier: courier, } } -func randAddr(h *custodianHarness) *address.AddrWithKeyInfo { - proofCourierAddr := address.RandProofCourierAddr(h.t) +func randAddr(h *custodianHarness) (*address.AddrWithKeyInfo, *asset.Genesis) { addr, genesis, group := address.RandAddr( - h.t, &address.RegressionNetTap, proofCourierAddr, + h.t, &address.RegressionNetTap, url.URL{ + Scheme: "mock", + }, ) err := h.tapdbBook.InsertAssetGen(context.Background(), genesis, group) require.NoError(h.t, err) - return addr + return addr, genesis } func randWalletTx(addr *address.AddrWithKeyInfo) (int, *lndclient.Transaction) { @@ -193,9 +246,12 @@ func randWalletTx(addr *address.AddrWithKeyInfo) (int, *lndclient.Transaction) { taprootOutput := rand.Intn(numOutputs) for idx := 0; idx < numInputs; idx++ { - in := &wire.TxIn{} - _, _ = rand.Read(in.PreviousOutPoint.Hash[:]) - in.PreviousOutPoint.Index = rand.Uint32() + in := &wire.TxIn{ + PreviousOutPoint: wire.OutPoint{ + Hash: test.RandHash(), + Index: rand.Uint32(), + }, + } tx.Tx.AddTxIn(in) tx.PreviousOutpoints = append( tx.PreviousOutpoints, &lnrpc.PreviousOutPoint{ @@ -231,6 +287,71 @@ func randWalletTx(addr *address.AddrWithKeyInfo) (int, *lndclient.Transaction) { return taprootOutput, tx } +func randProof(t *testing.T, outputIndex int, tx *wire.MsgTx, + genesis *asset.Genesis, + addr *address.AddrWithKeyInfo) *proof.AnnotatedProof { + + a := asset.Asset{ + Version: asset.V0, + Genesis: *genesis, + Amount: addr.Amount, + ScriptKey: asset.NewScriptKey(&addr.ScriptKey), + } + if addr.GroupKey != nil { + a.GroupKey = &asset.GroupKey{ + GroupPubKey: *addr.GroupKey, + } + } + + p := &proof.Proof{ + PrevOut: wire.OutPoint{}, + BlockHeader: wire.BlockHeader{ + Timestamp: time.Unix(rand.Int63(), 0), + }, + AnchorTx: *tx, + TxMerkleProof: proof.TxMerkleProof{}, + Asset: a, + InclusionProof: proof.TaprootProof{ + InternalKey: test.RandPubKey(t), + OutputIndex: uint32(outputIndex), + }, + } + + f, err := proof.NewFile(proof.V0, *p) + require.NoError(t, err) + + var buf bytes.Buffer + require.NoError(t, f.Encode(&buf)) + + ac, err := commitment.NewAssetCommitment(&a) + require.NoError(t, err) + tc, err := commitment.NewTapCommitment(ac) + require.NoError(t, err) + + op := wire.OutPoint{ + Hash: tx.TxHash(), + Index: uint32(outputIndex), + } + + return &proof.AnnotatedProof{ + Locator: proof.Locator{ + AssetID: fn.Ptr(genesis.ID()), + GroupKey: addr.GroupKey, + ScriptKey: addr.ScriptKey, + OutPoint: &op, + }, + Blob: buf.Bytes(), + AssetSnapshot: &proof.AssetSnapshot{ + Asset: &a, + OutPoint: op, + AnchorTx: tx, + OutputIndex: uint32(outputIndex), + InternalKey: test.RandPubKey(t), + ScriptRoot: tc, + }, + } +} + // insertAssetInfo starts a background goroutine that receives asset info that // was fetched from the asset syncer, and stores it in the address book. This // simulates asset bootstrapping that would occur during universe sync. @@ -279,7 +400,7 @@ func TestCustodianNewAddr(t *testing.T) { <-h.keyRing.ReqKeys }() ctx := context.Background() - addr := randAddr(h) + addr, _ := randAddr(h) proofCourierAddr := address.RandProofCourierAddr(t) dbAddr, err := h.addrBook.NewAddress( ctx, addr.AssetID, addr.Amount, nil, proofCourierAddr, @@ -373,6 +494,8 @@ func TestBookAssetSyncer(t *testing.T) { close(quitAssetWatcher) } +// TestTransactionHandling tests that the custodian correctly handles incoming +// transactions. func TestTransactionHandling(t *testing.T) { h := newHarness(t, nil) @@ -382,15 +505,21 @@ func TestTransactionHandling(t *testing.T) { const numAddrs = 5 addrs := make([]*address.AddrWithKeyInfo, numAddrs) + genesis := make([]*asset.Genesis, numAddrs) for i := 0; i < numAddrs; i++ { - addrs[i] = randAddr(h) + addrs[i], genesis[i] = randAddr(h) err := h.tapdbBook.InsertAddrs(ctx, *addrs[i]) require.NoError(t, err) } outputIdx, tx := randWalletTx(addrs[0]) + tx.Confirmations = 1 h.walletAnchor.Transactions = append(h.walletAnchor.Transactions, *tx) + mockProof := randProof(t, outputIdx, tx.Tx, genesis[0], addrs[0]) + err := h.courier.DeliverProof(nil, mockProof) + require.NoError(t, err) + require.NoError(t, h.c.Start()) t.Cleanup(func() { require.NoError(t, h.c.Stop()) @@ -402,21 +531,12 @@ func TestTransactionHandling(t *testing.T) { // Only one event should be registered though, as we've only created one // transaction. - h.eventually(func() bool { - events, err := h.tapdbBook.QueryAddrEvents( - ctx, address.EventQueryParams{}, - ) - require.NoError(t, err) - - if len(events) != 1 { - t.Logf("Got %d events", len(events)) - return false - } + events := h.assertEventsPresent(1, address.StatusCompleted) + require.EqualValues(t, outputIdx, events[0].Outpoint.Index) - require.EqualValues(t, outputIdx, events[0].Outpoint.Index) - - return true - }) + dbProof, err := h.assetDB.FetchProof(ctx, mockProof.Locator) + require.NoError(t, err) + require.EqualValues(t, mockProof.Blob, dbProof) } func mustMakeAddr(t *testing.T, From 7ec9edddf5d85ec08699502c5681420ade897190 Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Mon, 11 Dec 2023 15:24:16 +0100 Subject: [PATCH 37/54] tapgarden: extract receiving proof into method --- tapgarden/custodian.go | 161 ++++++++++++++++++++--------------------- 1 file changed, 79 insertions(+), 82 deletions(-) diff --git a/tapgarden/custodian.go b/tapgarden/custodian.go index a981c9f76..8b5eb16ff 100644 --- a/tapgarden/custodian.go +++ b/tapgarden/custodian.go @@ -405,98 +405,95 @@ func (c *Custodian) inspectWalletTx(walletTx *lndclient.Transaction) error { // goroutine to use the ProofCourier to import the proof into // our local DB. c.Wg.Add(1) - go func() { - defer c.Wg.Done() + go c.receiveProof(addr, op) + } - ctx, cancel := c.WithCtxQuitNoTimeout() - defer cancel() + return nil +} - assetID := addr.AssetID +// receiveProof attempts to receive a proof for the given address and outpoint +// via the proof courier service. +// +// NOTE: This must be called as a goroutine. +func (c *Custodian) receiveProof(addr *address.Tap, op wire.OutPoint) { + defer c.Wg.Done() - log.Debugf("Waiting to receive proof for script key %x", - addr.ScriptKey.SerializeCompressed()) + ctx, cancel := c.WithCtxQuitNoTimeout() + defer cancel() - // Initiate proof courier service handle from the proof - // courier address found in the Tap address. - recipient := proof.Recipient{ - ScriptKey: &addr.ScriptKey, - AssetID: assetID, - Amount: addr.Amount, - } - courier, err := c.cfg.ProofCourierDispatcher.NewCourier( - &addr.ProofCourierAddr, recipient, - ) - if err != nil { - log.Errorf("unable to initiate proof courier "+ - "service handle: %v", err) - return - } + assetID := addr.AssetID - // Update courier handle events subscribers before - // attempting to retrieve proof. - c.statusEventsSubsMtx.Lock() - courier.SetSubscribers(c.statusEventsSubs) - c.statusEventsSubsMtx.Unlock() - - // Sleep to give the sender an opportunity to transfer - // the proof to the proof courier service. - // Without this delay our first attempt at retrieving - // the proof will very likely fail. We should expect - // retrieval success before this delay. - select { - case <-time.After(c.cfg.ProofRetrievalDelay): - case <-ctx.Done(): - return - } + scriptKeyBytes := addr.ScriptKey.SerializeCompressed() + log.Debugf("Waiting to receive proof for script key %x", scriptKeyBytes) - // Attempt to receive proof via proof courier service. - loc := proof.Locator{ - AssetID: &assetID, - GroupKey: addr.GroupKey, - ScriptKey: addr.ScriptKey, - OutPoint: &op, - } - addrProof, err := courier.ReceiveProof(ctx, loc) - if err != nil { - log.Errorf("unable to recv proof: %v", err) - return - } + // Initiate proof courier service handle from the proof courier address + // found in the Tap address. + recipient := proof.Recipient{ + ScriptKey: &addr.ScriptKey, + AssetID: assetID, + Amount: addr.Amount, + } + courier, err := c.cfg.ProofCourierDispatcher.NewCourier( + &addr.ProofCourierAddr, recipient, + ) + if err != nil { + log.Errorf("unable to initiate proof courier service handle: "+ + "%v", err) + return + } - log.Debugf("Received proof for: script_key=%x, "+ - "asset_id=%x", - addr.ScriptKey.SerializeCompressed(), - assetID[:]) - - ctx, cancel = c.CtxBlocking() - defer cancel() - - headerVerifier := GenHeaderVerifier( - ctx, c.cfg.ChainBridge, - ) - err = c.cfg.ProofArchive.ImportProofs( - ctx, headerVerifier, c.cfg.GroupVerifier, false, - addrProof, - ) - if err != nil { - log.Errorf("unable to import proofs: %v", err) - return - } + // Update courier handle events subscribers before attempting to + // retrieve proof. + c.statusEventsSubsMtx.Lock() + courier.SetSubscribers(c.statusEventsSubs) + c.statusEventsSubsMtx.Unlock() + + // Sleep to give the sender an opportunity to transfer the proof to the + // proof courier service. Without this delay our first attempt at + // retrieving the proof will very likely fail. We should expect + // retrieval success before this delay. + select { + case <-time.After(c.cfg.ProofRetrievalDelay): + case <-ctx.Done(): + return + } - // At this point the "receive" process is complete. We - // will now notify all status event subscribers. - recvCompleteEvent := NewAssetRecvCompleteEvent( - *addr, op, - ) - err = c.publishSubscriberStatusEvent(recvCompleteEvent) - if err != nil { - log.Errorf("unable publish status event: %v", - err) - return - } - }() + // Attempt to receive proof via proof courier service. + loc := proof.Locator{ + AssetID: &assetID, + GroupKey: addr.GroupKey, + ScriptKey: addr.ScriptKey, + OutPoint: &op, + } + addrProof, err := courier.ReceiveProof(ctx, loc) + if err != nil { + log.Errorf("Unable to receive proof using courier: %v", err) + return } - return nil + log.Debugf("Received proof for: script_key=%x, asset_id=%x", + scriptKeyBytes, assetID[:]) + + ctx, cancel = c.CtxBlocking() + defer cancel() + + headerVerifier := GenHeaderVerifier(ctx, c.cfg.ChainBridge) + err = c.cfg.ProofArchive.ImportProofs( + ctx, headerVerifier, c.cfg.GroupVerifier, false, addrProof, + ) + if err != nil { + log.Errorf("Unable to import proofs: %v", err) + return + } + + // At this point the "receive" process is complete. We will now notify + // all status event subscribers. + receiveCompleteEvent := NewAssetRecvCompleteEvent(*addr, op) + err = c.publishSubscriberStatusEvent(receiveCompleteEvent) + if err != nil { + log.Errorf("Unable publish status event: %v", err) + return + } } // mapToTapAddr attempts to match a transaction output to a Taproot Asset From 35f380c2b1907dedcac48139a9830c43f84dbbc3 Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Mon, 11 Dec 2023 15:36:38 +0100 Subject: [PATCH 38/54] tapgarden: re-try proof courier on restart We didn't re-try using a proof courier to receive an asset after a restart of the daemon. This commit fixes #597. --- tapgarden/custodian.go | 33 ++++++++++++++++++++++++--------- 1 file changed, 24 insertions(+), 9 deletions(-) diff --git a/tapgarden/custodian.go b/tapgarden/custodian.go index 8b5eb16ff..150403026 100644 --- a/tapgarden/custodian.go +++ b/tapgarden/custodian.go @@ -271,11 +271,24 @@ func (c *Custodian) watchInboundAssets() { // Maybe a proof was delivered while we were shutting down or // starting up, let's check now. - err = c.checkProofAvailable(event) + available, err := c.checkProofAvailable(event) if err != nil { reportErr(err) return } + + // If we did find a proof, we did import it now and can remove + // the event from our cache. + if available { + delete(c.events, event.Outpoint) + + continue + } + + // If we didn't find a proof, we'll launch a goroutine to use + // the ProofCourier to import the proof into our local DB. + c.Wg.Add(1) + go c.receiveProof(event.Addr.Tap, event.Outpoint) } // Read all on-chain transactions and make sure they are mapped to an @@ -594,7 +607,7 @@ func (c *Custodian) importAddrToWallet(addr *address.AddrWithKeyInfo) error { // checkProofAvailable checks the proof storage if a proof for the given event // is already available. If it is, and it checks out, the event is updated. -func (c *Custodian) checkProofAvailable(event *address.Event) error { +func (c *Custodian) checkProofAvailable(event *address.Event) (bool, error) { ctxt, cancel := c.WithCtxQuit() defer cancel() @@ -610,34 +623,36 @@ func (c *Custodian) checkProofAvailable(event *address.Event) error { }) switch { case errors.Is(err, proof.ErrProofNotFound): - return nil + return false, nil case err != nil: - return fmt.Errorf("error fetching proof for event: %w", err) + return false, fmt.Errorf("error fetching proof for event: %w", + err) } file := proof.NewEmptyFile(proof.V0) if err := file.Decode(bytes.NewReader(blob)); err != nil { - return fmt.Errorf("error decoding proof file: %w", err) + return false, fmt.Errorf("error decoding proof file: %w", err) } // Exit early on empty proof (shouldn't happen outside of test cases). if file.IsEmpty() { - return fmt.Errorf("archive contained empty proof file: %w", err) + return false, fmt.Errorf("archive contained empty proof file: "+ + "%w", err) } lastProof, err := file.LastProof() if err != nil { - return fmt.Errorf("error fetching last proof: %w", err) + return false, fmt.Errorf("error fetching last proof: %w", err) } // The proof might be an old state, let's make sure it matches our event // before marking the inbound asset transfer as complete. if AddrMatchesAsset(event.Addr, &lastProof.Asset) { - return c.setReceiveCompleted(event, lastProof, file) + return true, c.setReceiveCompleted(event, lastProof, file) } - return nil + return false, nil } // mapProofToEvent inspects a new proof and attempts to match it to an existing From 7c6d37b6b8f40d763151985532fd06e73fe0ff6e Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Mon, 11 Dec 2023 15:39:07 +0100 Subject: [PATCH 39/54] tapgarden: always notify subscribers on receive complete This commit moves the notification for subscribers about a transfer being complete from the proof courier only part to the general function we end up in if a transfer is complete. --- tapgarden/custodian.go | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/tapgarden/custodian.go b/tapgarden/custodian.go index 150403026..d92cc8be7 100644 --- a/tapgarden/custodian.go +++ b/tapgarden/custodian.go @@ -498,15 +498,6 @@ func (c *Custodian) receiveProof(addr *address.Tap, op wire.OutPoint) { log.Errorf("Unable to import proofs: %v", err) return } - - // At this point the "receive" process is complete. We will now notify - // all status event subscribers. - receiveCompleteEvent := NewAssetRecvCompleteEvent(*addr, op) - err = c.publishSubscriberStatusEvent(receiveCompleteEvent) - if err != nil { - log.Errorf("Unable publish status event: %v", err) - return - } } // mapToTapAddr attempts to match a transaction output to a Taproot Asset @@ -707,13 +698,23 @@ func (c *Custodian) mapProofToEvent(p proof.Blob) error { func (c *Custodian) setReceiveCompleted(event *address.Event, lastProof *proof.Proof, proofFile *proof.File) error { + // At this point the "receive" process is complete. We will now notify + // all status event subscribers. + receiveCompleteEvent := NewAssetRecvCompleteEvent( + *event.Addr.Tap, event.Outpoint, + ) + err := c.publishSubscriberStatusEvent(receiveCompleteEvent) + if err != nil { + log.Errorf("Unable publish status event: %v", err) + } + // The proof is created after a single confirmation. To make sure we // notice if the anchor transaction is re-organized out of the chain, we // give all the not-yet-sufficiently-buried proofs in the received proof // file to the re-org watcher and replace the updated proof in the local // proof archive if a re-org happens. The sender will do the same, so no // re-send of the proof is necessary. - err := c.cfg.ProofWatcher.MaybeWatch( + err = c.cfg.ProofWatcher.MaybeWatch( proofFile, c.cfg.ProofWatcher.DefaultUpdateCallback(), ) if err != nil { From 99c240b92b837c8883aab6d641edce03bd39d466 Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Mon, 11 Dec 2023 19:20:07 +0100 Subject: [PATCH 40/54] tapgarden: start courier only after confirmation This commit makes sure we only start the proof courier once the on-chain transaction has confirmed. Otherwise we'll run into backoffs for sure until we get the first confirmation. --- tapgarden/custodian.go | 29 ++++++++++---- tapgarden/custodian_test.go | 80 +++++++++++++++++++++++++++++++++++++ 2 files changed, 102 insertions(+), 7 deletions(-) diff --git a/tapgarden/custodian.go b/tapgarden/custodian.go index d92cc8be7..8d5b6b352 100644 --- a/tapgarden/custodian.go +++ b/tapgarden/custodian.go @@ -396,13 +396,21 @@ func (c *Custodian) inspectWalletTx(walletTx *lndclient.Transaction) error { } c.events[op] = event + + // Now that we've seen this output confirm on + // chain, we'll launch a goroutine to use the + // ProofCourier to import the proof into our + // local DB. + c.Wg.Add(1) + go c.receiveProof(event.Addr.Tap, op) } continue } // This is a new output, let's find out if it's for an address - // of ours. + // of ours. This step also creates a new event for the address + // if it doesn't exist yet. addr, err := c.mapToTapAddr(walletTx, uint32(idx), op) if err != nil { return err @@ -414,9 +422,17 @@ func (c *Custodian) inspectWalletTx(walletTx *lndclient.Transaction) error { continue } - // Now that we've seen this output on chain, we'll launch a - // goroutine to use the ProofCourier to import the proof into - // our local DB. + // We now need to wait for a confirmation, since proofs will + // be delivered once the anchor transaction is confirmed. If + // we skip it now, we'll receive another notification once the + // transaction is confirmed. + if walletTx.Confirmations == 0 { + continue + } + + // Now that we've seen this output confirm on chain, we'll + // launch a goroutine to use the ProofCourier to import the + // proof into our local DB. c.Wg.Add(1) go c.receiveProof(addr, op) } @@ -450,7 +466,7 @@ func (c *Custodian) receiveProof(addr *address.Tap, op wire.OutPoint) { &addr.ProofCourierAddr, recipient, ) if err != nil { - log.Errorf("unable to initiate proof courier service handle: "+ + log.Errorf("Unable to initiate proof courier service handle: "+ "%v", err) return } @@ -589,8 +605,7 @@ func (c *Custodian) importAddrToWallet(addr *address.AddrWithKeyInfo) error { log.Infof("Imported Taproot Asset address %v into wallet", addrStr) if p2trAddr != nil { - log.Infof("watching p2tr address %v on chain", - p2trAddr.String()) + log.Infof("Watching p2tr address %v on chain", p2trAddr) } return c.cfg.AddrBook.SetAddrManaged(ctxt, addr, time.Now()) diff --git a/tapgarden/custodian_test.go b/tapgarden/custodian_test.go index 28ca2a61c..9f2fcf860 100644 --- a/tapgarden/custodian_test.go +++ b/tapgarden/custodian_test.go @@ -539,6 +539,86 @@ func TestTransactionHandling(t *testing.T) { require.EqualValues(t, mockProof.Blob, dbProof) } +// TestTransactionConfirmedOnly tests that the custodian only starts the proof +// courier once a transaction has been confirmed. We also test that it correctly +// re-tries fetching proofs using a proof courier after it has been restarted. +func TestTransactionConfirmedOnly(t *testing.T) { + t.Parallel() + + runTransactionConfirmedOnlyTest(t, false) + runTransactionConfirmedOnlyTest(t, true) +} + +// runTransactionConfirmedOnlyTest runs the transaction confirmed only test, +// optionally restarting the custodian in the middle. +func runTransactionConfirmedOnlyTest(t *testing.T, withRestart bool) { + h := newHarness(t, nil) + + // Before we start the custodian, we create a few random addresses. + ctx := context.Background() + + const numAddrs = 5 + addrs := make([]*address.AddrWithKeyInfo, numAddrs) + genesis := make([]*asset.Genesis, numAddrs) + for i := 0; i < numAddrs; i++ { + addrs[i], genesis[i] = randAddr(h) + err := h.tapdbBook.InsertAddrs(ctx, *addrs[i]) + require.NoError(t, err) + } + + // We start the custodian and make sure it's started up correctly. This + // should add pending events for each of the addresses. + require.NoError(t, h.c.Start()) + t.Cleanup(func() { + require.NoError(t, h.c.Stop()) + }) + h.assertStartup() + + // We expect all addresses to be watched by the wallet now. + h.assertAddrsRegistered(addrs...) + + // To make sure the custodian adds address events for each address, we + // need to signal an unconfirmed transaction for each of them now. + outputIndexes := make([]int, numAddrs) + transactions := make([]*lndclient.Transaction, numAddrs) + for idx := range addrs { + outputIndex, tx := randWalletTx(addrs[idx]) + outputIndexes[idx] = outputIndex + transactions[idx] = tx + h.walletAnchor.SubscribeTx <- *tx + + // We also simulate that the proof courier has all the proofs + // it needs. + mockProof := randProof( + t, outputIndexes[idx], tx.Tx, genesis[idx], addrs[idx], + ) + _ = h.courier.DeliverProof(nil, mockProof) + } + + // We want events to be created for each address, they should be in the + // state where they detected a transaction. + h.assertEventsPresent(numAddrs, address.StatusTransactionDetected) + + // In case we're testing with a restart, we now restart the custodian. + if withRestart { + require.NoError(t, h.c.Stop()) + + h.c = tapgarden.NewCustodian(h.cfg) + require.NoError(t, h.c.Start()) + h.assertStartup() + } + + // Now we confirm the transactions. This should trigger the custodian to + // fetch the proof for each of the addresses. + for idx := range transactions { + tx := transactions[idx] + tx.Confirmations = 1 + h.walletAnchor.SubscribeTx <- *tx + } + + h.assertEventsPresent(numAddrs, address.StatusCompleted) +} + func mustMakeAddr(t *testing.T, gen asset.Genesis, groupKey *btcec.PublicKey, groupWitness wire.TxWitness, scriptKey btcec.PublicKey) *address.Tap { From fbe8814d52c872343ceb94b3083bca7d61806ee6 Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Mon, 11 Dec 2023 19:46:24 +0100 Subject: [PATCH 41/54] itest: save about 40 seconds in itest By reducing the hashmail proof receiver ACK timeout from 15 seconds to 5 seconds, we can save quite some time during the integration tests. --- itest/send_test.go | 10 +++++++--- itest/tapd_harness.go | 2 +- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/itest/send_test.go b/itest/send_test.go index c7ab48f26..ca709666f 100644 --- a/itest/send_test.go +++ b/itest/send_test.go @@ -21,6 +21,10 @@ import ( "github.com/stretchr/testify/require" ) +var ( + transferTypeSend = taprpc.ProofTransferType_PROOF_TRANSFER_TYPE_SEND +) + // testBasicSendUnidirectional tests that we can properly send assets back and // forth between nodes. func testBasicSendUnidirectional(t *harnessTest) { @@ -619,7 +623,7 @@ func testReattemptFailedSendHashmailCourier(t *harnessTest) { switch eventTyped := event.Event.(type) { case *taprpc.SendAssetEvent_ProofTransferBackoffWaitEvent: ev := eventTyped.ProofTransferBackoffWaitEvent - if ev.TransferType != taprpc.ProofTransferType_PROOF_TRANSFER_TYPE_SEND { + if ev.TransferType != transferTypeSend { return false } @@ -726,7 +730,7 @@ func testReattemptFailedSendUniCourier(t *harnessTest) { switch eventTyped := event.Event.(type) { case *taprpc.SendAssetEvent_ProofTransferBackoffWaitEvent: ev := eventTyped.ProofTransferBackoffWaitEvent - if ev.TransferType != taprpc.ProofTransferType_PROOF_TRANSFER_TYPE_SEND { + if ev.TransferType != transferTypeSend { return false } @@ -1000,7 +1004,7 @@ func testOfflineReceiverEventuallyReceives(t *harnessTest) { // node. We therefore expect to receive // deliver transfer type backoff wait events // for sending transfers. - if ev.TransferType != taprpc.ProofTransferType_PROOF_TRANSFER_TYPE_SEND { + if ev.TransferType != transferTypeSend { return false } diff --git a/itest/tapd_harness.go b/itest/tapd_harness.go index 25220115b..05664927b 100644 --- a/itest/tapd_harness.go +++ b/itest/tapd_harness.go @@ -76,7 +76,7 @@ const ( // defaultProofTransferReceiverAckTimeout is the default itest specific // timeout we'll use for waiting for a receiver to acknowledge a proof // transfer. - defaultProofTransferReceiverAckTimeout = 15 * time.Second + defaultProofTransferReceiverAckTimeout = 5 * time.Second ) // tapdHarness is a test harness that holds everything that is needed to From 70f7176b4ce8a03dac41ddf38dba52f546e29c2d Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Fri, 26 Jan 2024 14:30:03 -0800 Subject: [PATCH 42/54] tapdb: add support for logging migrations In this commit, we add support for logging migrations as they happen. This is useful for users to keep track of what happens during a `tapd` update. --- tapdb/migrations.go | 47 +++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 45 insertions(+), 2 deletions(-) diff --git a/tapdb/migrations.go b/tapdb/migrations.go index 117feac90..4c288380f 100644 --- a/tapdb/migrations.go +++ b/tapdb/migrations.go @@ -2,17 +2,52 @@ package tapdb import ( "bytes" + "errors" "io" "io/fs" "net/http" "strings" + "github.com/btcsuite/btclog" "github.com/golang-migrate/migrate/v4" "github.com/golang-migrate/migrate/v4/database" "github.com/golang-migrate/migrate/v4/source/httpfs" ) -// applyMigrations executes all database migration files found in the given file +// migrationLogger is a logger that wraps the passed btclog.Logger so it can be +// used to log migrations. +type migrationLogger struct { + log btclog.Logger +} + +// Printf is like fmt.Printf. We map this to the target logger based on the +// current log level. +func (m *migrationLogger) Printf(format string, v ...interface{}) { + // Trim trailing newlines from the format. + format = strings.TrimRight(format, "\n") + + switch m.log.Level() { + case btclog.LevelTrace: + m.log.Tracef(format, v...) + case btclog.LevelDebug: + m.log.Debugf(format, v...) + case btclog.LevelInfo: + m.log.Infof(format, v...) + case btclog.LevelWarn: + m.log.Warnf(format, v...) + case btclog.LevelError: + m.log.Errorf(format, v...) + case btclog.LevelCritical: + m.log.Criticalf(format, v...) + } +} + +// Verbose should return true when verbose logging output is wanted +func (m *migrationLogger) Verbose() bool { + return m.log.Level() <= btclog.LevelDebug +} + +// applyMigrations executes database migration files found in the given file // system under the given path, using the passed database driver and database // name. func applyMigrations(fs fs.FS, driver database.Driver, path, @@ -36,8 +71,16 @@ func applyMigrations(fs fs.FS, driver database.Driver, path, if err != nil { return err } + + migrationVersion, _, _ := sqlMigrate.Version() + + log.Infof("Applying migrations from version=%v", migrationVersion) + + // Apply our local logger to the migration instance. + sqlMigrate.Log = &migrationLogger{log} + err = sqlMigrate.Up() - if err != nil && err != migrate.ErrNoChange { + if err != nil && !errors.Is(err, migrate.ErrNoChange) { return err } From b71ccb7b2f3a0122e901d9e4dec16b5a230dab25 Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Wed, 13 Dec 2023 15:22:53 +0100 Subject: [PATCH 43/54] proof+tapgarden: shrink NotifyArchiver interface With this commit we change the NotifyArchiver to the bare minimum that we actually need in the custodian. --- proof/archive.go | 7 ++++++- tapgarden/re-org_watcher.go | 2 +- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/proof/archive.go b/proof/archive.go index 9dd38f472..4bb0e4eb0 100644 --- a/proof/archive.go +++ b/proof/archive.go @@ -125,7 +125,12 @@ type Archiver interface { // NotifyArchiver is an Archiver that also allows callers to subscribe to // notifications about new proofs being added to the archiver. type NotifyArchiver interface { - Archiver + // FetchProof fetches a proof for an asset uniquely identified by the + // passed Identifier. The returned blob is expected to be the encoded + // full proof file, containing the complete provenance of the asset. + // + // If a proof cannot be found, then ErrProofNotFound should be returned. + FetchProof(ctx context.Context, id Locator) (Blob, error) fn.EventPublisher[Blob, []*Locator] } diff --git a/tapgarden/re-org_watcher.go b/tapgarden/re-org_watcher.go index 4b650d60c..3bb20201e 100644 --- a/tapgarden/re-org_watcher.go +++ b/tapgarden/re-org_watcher.go @@ -68,7 +68,7 @@ type ReOrgWatcherConfig struct { // ProofArchive is the storage backend for proofs to which we store // updated proofs. - ProofArchive proof.NotifyArchiver + ProofArchive proof.Archiver // NonBuriedAssetFetcher is a function that returns all assets that are // not yet sufficiently deep buried. From dd117ec476476601ca1f49e70e58e8666a647692 Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Wed, 13 Dec 2023 15:22:54 +0100 Subject: [PATCH 44/54] proof: extract FetchProofProvenance function for iterative retrieval With this commit we extract the logic for iteratively retrieving the full provenance for an asset by starting at the last proof then querying each previous proof until we arrive at the genesis. We will want to re-use this logic outside of the proof courier itself, so it's useful to extract into a general-purpose function where the actual retrieval of an individual proof is done in a callback and can be adjusted to the current proof storage (local or remote). --- proof/courier.go | 162 ++++++++++++++++++++++++++++------------------- 1 file changed, 96 insertions(+), 66 deletions(-) diff --git a/proof/courier.go b/proof/courier.go index d250a098b..808a4930b 100644 --- a/proof/courier.go +++ b/proof/courier.go @@ -1137,36 +1137,24 @@ func (c *UniverseRpcCourier) DeliverProof(ctx context.Context, func (c *UniverseRpcCourier) ReceiveProof(ctx context.Context, originLocator Locator) (*AnnotatedProof, error) { - // In order to reconstruct the proof file we must collect all the - // transition proofs that make up the main chain of proofs. That is - // accomplished by iterating backwards through the main chain of proofs - // until we reach the genesis point (minting proof). - - // We will update the locator at each iteration. - loc := originLocator - - // revProofs is a slice of transition proofs ordered from latest to - // earliest (the issuance proof comes last in the slice). This ordering - // is a reversal of that found in the proof file. - var revProofs []Proof - - for { - assetID := *loc.AssetID - + fetchProof := func(ctx context.Context, loc Locator) (Blob, error) { var groupKeyBytes []byte if loc.GroupKey != nil { groupKeyBytes = loc.GroupKey.SerializeCompressed() } - universeID := unirpc.MarshalUniverseID( - assetID[:], groupKeyBytes, - ) - assetKey := unirpc.MarshalAssetKey( - *loc.OutPoint, &loc.ScriptKey, - ) + if loc.OutPoint == nil { + return nil, fmt.Errorf("proof locator for asset %x "+ + "is missing outpoint", loc.AssetID[:]) + } + universeKey := unirpc.UniverseKey{ - Id: universeID, - LeafKey: assetKey, + Id: unirpc.MarshalUniverseID( + loc.AssetID[:], groupKeyBytes, + ), + LeafKey: unirpc.MarshalAssetKey( + *loc.OutPoint, &loc.ScriptKey, + ), } // Setup proof receive/query routine and start backoff @@ -1197,50 +1185,13 @@ func (c *UniverseRpcCourier) ReceiveProof(ctx context.Context, "attempt has failed: %w", err) } - // Decode transition proof from query response. - var transitionProof Proof - if err := transitionProof.Decode( - bytes.NewReader(proofBlob), - ); err != nil { - return nil, err - } - - revProofs = append(revProofs, transitionProof) - - // Break if we've reached the genesis point (the asset is the - // genesis asset). - proofAsset := transitionProof.Asset - if proofAsset.IsGenesisAsset() { - break - } - - // Update locator with principal input to the current outpoint. - prevID, err := transitionProof.Asset.PrimaryPrevID() - if err != nil { - return nil, err - } - - // Parse script key public key. - scriptKeyPubKey, err := btcec.ParsePubKey(prevID.ScriptKey[:]) - if err != nil { - return nil, fmt.Errorf("failed to parse script key "+ - "public key from Proof.PrevID: %w", err) - } - loc.ScriptKey = *scriptKeyPubKey - - loc.AssetID = &prevID.ID - loc.OutPoint = &prevID.OutPoint + return proofBlob, nil } - // Append proofs to proof file in reverse order to their collected - // order. - proofFile := &File{} - for i := len(revProofs) - 1; i >= 0; i-- { - err := proofFile.AppendProof(revProofs[i]) - if err != nil { - return nil, fmt.Errorf("error appending proof to "+ - "proof file: %w", err) - } + proofFile, err := FetchProofProvenance(ctx, originLocator, fetchProof) + if err != nil { + return nil, fmt.Errorf("error fetching proof provenance: %w", + err) } // Encode the full proof file. @@ -1315,3 +1266,82 @@ type TransferLog interface { QueryProofTransferLog(context.Context, Locator, TransferType) ([]time.Time, error) } + +// FetchProofProvenance iterates backwards through the main chain of proofs +// until it reaches the genesis point (the asset is the genesis asset) and then +// returns the full proof file with the full provenance for the asset. +func FetchProofProvenance(ctx context.Context, originLocator Locator, + fetchSingleProof func(context.Context, Locator) (Blob, error)) (*File, + error) { + + // In order to reconstruct the proof file we must collect all the + // transition proofs that make up the main chain of proofs. That is + // accomplished by iterating backwards through the main chain of proofs + // until we reach the genesis point (minting proof). + + // We will update the locator at each iteration. + currentLocator := originLocator + + // reversedProofs is a slice of transition proofs ordered from latest to + // earliest (the issuance proof comes last in the slice). This ordering + // is a reversal of that found in the proof file. + var reversedProofs []Proof + for { + // Setup proof receive/query routine and start backoff + // procedure. + proofBlob, err := fetchSingleProof(ctx, currentLocator) + if err != nil { + return nil, fmt.Errorf("fetching single proof "+ + "failed: %w", err) + } + + // Decode transition proof from query response. + var currentProof Proof + err = currentProof.Decode(bytes.NewReader(proofBlob)) + if err != nil { + return nil, err + } + + reversedProofs = append(reversedProofs, currentProof) + + // Break if we've reached the genesis point (the asset is the + // genesis asset). + proofAsset := currentProof.Asset + if proofAsset.IsGenesisAsset() { + break + } + + // Update locator with principal input to the current outpoint. + prevID, err := currentProof.Asset.PrimaryPrevID() + if err != nil { + return nil, err + } + + // Parse script key public key. + scriptKeyPubKey, err := btcec.ParsePubKey(prevID.ScriptKey[:]) + if err != nil { + return nil, fmt.Errorf("failed to parse script key "+ + "public key from Proof.PrevID: %w", err) + } + + currentLocator = Locator{ + AssetID: &prevID.ID, + GroupKey: originLocator.GroupKey, + ScriptKey: *scriptKeyPubKey, + OutPoint: &prevID.OutPoint, + } + } + + // Append proofs to proof file in reverse order to their collected + // order. + proofFile := &File{} + for i := len(reversedProofs) - 1; i >= 0; i-- { + err := proofFile.AppendProof(reversedProofs[i]) + if err != nil { + return nil, fmt.Errorf("error appending proof to "+ + "proof file: %w", err) + } + } + + return proofFile, nil +} From 53c3375e72e5b0d4dcf330df3e43de88a1fbcd59 Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Wed, 13 Dec 2023 15:22:55 +0100 Subject: [PATCH 45/54] proof: sparse decode proof in FetchProofProvenance This is an optimization that allows us to only extract the asset from the proof when fetching the full proof provenance. That saves us a whole decode/encode round trip per proof. --- proof/courier.go | 19 ++++++++++--------- proof/file.go | 20 ++++++++++++++++++++ 2 files changed, 30 insertions(+), 9 deletions(-) diff --git a/proof/courier.go b/proof/courier.go index 808a4930b..c3586751f 100644 --- a/proof/courier.go +++ b/proof/courier.go @@ -1285,7 +1285,7 @@ func FetchProofProvenance(ctx context.Context, originLocator Locator, // reversedProofs is a slice of transition proofs ordered from latest to // earliest (the issuance proof comes last in the slice). This ordering // is a reversal of that found in the proof file. - var reversedProofs []Proof + var reversedProofs []Blob for { // Setup proof receive/query routine and start backoff // procedure. @@ -1295,24 +1295,25 @@ func FetchProofProvenance(ctx context.Context, originLocator Locator, "failed: %w", err) } - // Decode transition proof from query response. - var currentProof Proof - err = currentProof.Decode(bytes.NewReader(proofBlob)) + // Decode just the asset leaf record from the proof. + var proofAsset asset.Asset + assetRecord := AssetLeafRecord(&proofAsset) + err = SparseDecode(bytes.NewReader(proofBlob), assetRecord) if err != nil { - return nil, err + return nil, fmt.Errorf("unable to decode proof: %w", + err) } - reversedProofs = append(reversedProofs, currentProof) + reversedProofs = append(reversedProofs, proofBlob) // Break if we've reached the genesis point (the asset is the // genesis asset). - proofAsset := currentProof.Asset if proofAsset.IsGenesisAsset() { break } // Update locator with principal input to the current outpoint. - prevID, err := currentProof.Asset.PrimaryPrevID() + prevID, err := proofAsset.PrimaryPrevID() if err != nil { return nil, err } @@ -1336,7 +1337,7 @@ func FetchProofProvenance(ctx context.Context, originLocator Locator, // order. proofFile := &File{} for i := len(reversedProofs) - 1; i >= 0; i-- { - err := proofFile.AppendProof(reversedProofs[i]) + err := proofFile.AppendProofRaw(reversedProofs[i]) if err != nil { return nil, fmt.Errorf("error appending proof to "+ "proof file: %w", err) diff --git a/proof/file.go b/proof/file.go index 3e53e9316..e3c0998d3 100644 --- a/proof/file.go +++ b/proof/file.go @@ -393,6 +393,26 @@ func (f *File) AppendProof(proof Proof) error { return nil } +// AppendProofRaw appends a raw proof to the file and calculates its chained +// hash. +func (f *File) AppendProofRaw(proof []byte) error { + if f.IsUnknownVersion() { + return ErrUnknownVersion + } + + var prevHash [sha256.Size]byte + if !f.IsEmpty() { + prevHash = f.proofs[len(f.proofs)-1].hash + } + + f.proofs = append(f.proofs, &hashedProof{ + proofBytes: proof, + hash: hashProof(proof, prevHash), + }) + + return nil +} + // ReplaceLastProof attempts to replace the last proof in the file with another // one, updating its chained hash in the process. func (f *File) ReplaceLastProof(proof Proof) error { From d249d5e97d92bd75376a8da2903b6efbe6d923b0 Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Wed, 13 Dec 2023 15:22:56 +0100 Subject: [PATCH 46/54] multi: add HasProof to proof.Archive interface In some situations we want a quick way to find out if we have a proof (or not) without actually fetching it, so we add a HasProof method to the proof.Archive interface. The MultiArchiver will return false if one of the backends does not have the proof, so we can use that method to find out if we have a discrepancy between the backends and should import a proof file. --- proof/archive.go | 42 +++++++++++++++++++++++++++++++++++ tapdb/assets_store.go | 36 ++++++++++++++++++++++++++++++ tapdb/sqlc/assets.sql.go | 21 ++++++++++++++++++ tapdb/sqlc/querier.go | 1 + tapdb/sqlc/queries/assets.sql | 13 +++++++++++ tapdb/sqlutils_test.go | 14 ++++++++++++ tapgarden/mock.go | 6 +++++ 7 files changed, 133 insertions(+) diff --git a/proof/archive.go b/proof/archive.go index 4bb0e4eb0..ff02c5b24 100644 --- a/proof/archive.go +++ b/proof/archive.go @@ -107,6 +107,11 @@ type Archiver interface { // returned. FetchProof(ctx context.Context, id Locator) (Blob, error) + // HasProof returns true if the proof for the given locator exists. This + // is intended to be a performance optimized lookup compared to fetching + // a proof and checking for ErrProofNotFound. + HasProof(ctx context.Context, id Locator) (bool, error) + // FetchProofs fetches all proofs for assets uniquely identified by the // passed asset ID. FetchProofs(ctx context.Context, id asset.ID) ([]*AnnotatedProof, error) @@ -221,6 +226,22 @@ func (f *FileArchiver) FetchProof(_ context.Context, id Locator) (Blob, error) { return proofFile, nil } +// HasProof returns true if the proof for the given locator exists. This is +// intended to be a performance optimized lookup compared to fetching a proof +// and checking for ErrProofNotFound. +func (f *FileArchiver) HasProof(_ context.Context, id Locator) (bool, error) { + // All our on-disk storage is based on asset IDs, so to look up a path, + // we just need to compute the full file path and see if it exists on + // disk. + proofPath, err := genProofFilePath(f.proofPath, id) + if err != nil { + return false, fmt.Errorf("unable to make proof file path: %w", + err) + } + + return lnrpc.FileExists(proofPath), nil +} + // FetchProofs fetches all proofs for assets uniquely identified by the passed // asset ID. func (f *FileArchiver) FetchProofs(_ context.Context, @@ -412,6 +433,27 @@ func (m *MultiArchiver) FetchProof(ctx context.Context, return nil, ErrProofNotFound } +// HasProof returns true if the proof for the given locator exists. This is +// intended to be a performance optimized lookup compared to fetching a proof +// and checking for ErrProofNotFound. The multi archiver only considers a proof +// to be present if all backends have it. +func (m *MultiArchiver) HasProof(ctx context.Context, id Locator) (bool, error) { + for _, archive := range m.backends { + ok, err := archive.HasProof(ctx, id) + if err != nil { + return false, err + } + + // We are expecting all backends to have the proof, otherwise we + // consider the proof not to be found. + if !ok { + return false, nil + } + } + + return true, nil +} + // FetchProofs fetches all proofs for assets uniquely identified by the passed // asset ID. func (m *MultiArchiver) FetchProofs(ctx context.Context, diff --git a/tapdb/assets_store.go b/tapdb/assets_store.go index 2699445eb..606622601 100644 --- a/tapdb/assets_store.go +++ b/tapdb/assets_store.go @@ -175,6 +175,10 @@ type ActiveAssetsStore interface { FetchAssetProof(ctx context.Context, scriptKey []byte) (AssetProofI, error) + // HasAssetProof returns true if we have proof for a given asset + // identified by its script key. + HasAssetProof(ctx context.Context, scriptKey []byte) (bool, error) + // FetchAssetProofsByAssetID fetches all asset proofs for a given asset // ID. FetchAssetProofsByAssetID(ctx context.Context, @@ -1238,6 +1242,38 @@ func (a *AssetStore) FetchProof(ctx context.Context, return diskProof, nil } +// HasProof returns true if the proof for the given locator exists. This is +// intended to be a performance optimized lookup compared to fetching a proof +// and checking for ErrProofNotFound. +func (a *AssetStore) HasProof(ctx context.Context, locator proof.Locator) (bool, + error) { + + // We don't need anything else but the script key since we have an + // on-disk index for all proofs we store. + var ( + scriptKey = locator.ScriptKey + readOpts = NewAssetStoreReadTx() + haveProof bool + ) + dbErr := a.db.ExecTx(ctx, &readOpts, func(q ActiveAssetsStore) error { + proofAvailable, err := q.HasAssetProof( + ctx, scriptKey.SerializeCompressed(), + ) + if err != nil { + return fmt.Errorf("unable to find out if we have "+ + "asset proof: %w", err) + } + + haveProof = proofAvailable + return nil + }) + if dbErr != nil { + return false, dbErr + } + + return haveProof, nil +} + // FetchProofs fetches all proofs for assets uniquely identified by the passed // asset ID. // diff --git a/tapdb/sqlc/assets.sql.go b/tapdb/sqlc/assets.sql.go index 5a30c91ea..a89066174 100644 --- a/tapdb/sqlc/assets.sql.go +++ b/tapdb/sqlc/assets.sql.go @@ -1598,6 +1598,27 @@ func (q *Queries) GenesisPoints(ctx context.Context) ([]GenesisPoint, error) { return items, nil } +const hasAssetProof = `-- name: HasAssetProof :one +WITH asset_info AS ( + SELECT assets.asset_id + FROM assets + JOIN script_keys + ON assets.script_key_id = script_keys.script_key_id + WHERE script_keys.tweaked_script_key = $1 +) +SELECT COUNT(asset_info.asset_id) > 0 as has_proof +FROM asset_proofs +JOIN asset_info + ON asset_info.asset_id = asset_proofs.asset_id +` + +func (q *Queries) HasAssetProof(ctx context.Context, tweakedScriptKey []byte) (bool, error) { + row := q.db.QueryRowContext(ctx, hasAssetProof, tweakedScriptKey) + var has_proof bool + err := row.Scan(&has_proof) + return has_proof, err +} + const insertAssetSeedling = `-- name: InsertAssetSeedling :exec INSERT INTO asset_seedlings ( asset_name, asset_type, asset_version, asset_supply, asset_meta_id, diff --git a/tapdb/sqlc/querier.go b/tapdb/sqlc/querier.go index 3a9a6b27a..71fad7803 100644 --- a/tapdb/sqlc/querier.go +++ b/tapdb/sqlc/querier.go @@ -79,6 +79,7 @@ type Querier interface { GenesisAssets(ctx context.Context) ([]GenesisAsset, error) GenesisPoints(ctx context.Context) ([]GenesisPoint, error) GetRootKey(ctx context.Context, id []byte) (Macaroon, error) + HasAssetProof(ctx context.Context, tweakedScriptKey []byte) (bool, error) InsertAddr(ctx context.Context, arg InsertAddrParams) (int64, error) InsertAssetSeedling(ctx context.Context, arg InsertAssetSeedlingParams) error InsertAssetSeedlingIntoBatch(ctx context.Context, arg InsertAssetSeedlingIntoBatchParams) error diff --git a/tapdb/sqlc/queries/assets.sql b/tapdb/sqlc/queries/assets.sql index cc33881e7..84a68ab5d 100644 --- a/tapdb/sqlc/queries/assets.sql +++ b/tapdb/sqlc/queries/assets.sql @@ -690,6 +690,19 @@ FROM asset_proofs JOIN asset_info ON asset_info.asset_id = asset_proofs.asset_id; +-- name: HasAssetProof :one +WITH asset_info AS ( + SELECT assets.asset_id + FROM assets + JOIN script_keys + ON assets.script_key_id = script_keys.script_key_id + WHERE script_keys.tweaked_script_key = $1 +) +SELECT COUNT(asset_info.asset_id) > 0 as has_proof +FROM asset_proofs +JOIN asset_info + ON asset_info.asset_id = asset_proofs.asset_id; + -- name: InsertAssetWitness :exec INSERT INTO asset_witnesses ( asset_id, prev_out_point, prev_asset_id, prev_script_key, witness_stack, diff --git a/tapdb/sqlutils_test.go b/tapdb/sqlutils_test.go index 6f0786fb3..034cf4bcd 100644 --- a/tapdb/sqlutils_test.go +++ b/tapdb/sqlutils_test.go @@ -150,6 +150,15 @@ func (d *DbHandler) AddRandomAssetProof(t *testing.T) (*asset.Asset, }) require.NoError(t, err, "unable to insert chain tx: %w", err) + // Before we insert the proof, we expect our backend to report it as not + // found. + proofLocator := proof.Locator{ + ScriptKey: *testAsset.ScriptKey.PubKey, + } + found, err := assetStore.HasProof(ctx, proofLocator) + require.NoError(t, err) + require.False(t, found) + // With all our test data constructed, we'll now attempt to import the // asset into the database. require.NoError(t, assetStore.ImportProofs( @@ -157,6 +166,11 @@ func (d *DbHandler) AddRandomAssetProof(t *testing.T) (*asset.Asset, annotatedProof, )) + // Now the HasProof should return true. + found, err = assetStore.HasProof(ctx, proofLocator) + require.NoError(t, err) + require.True(t, found) + return testAsset, annotatedProof } diff --git a/tapgarden/mock.go b/tapgarden/mock.go index 160ef3b0e..887272e53 100644 --- a/tapgarden/mock.go +++ b/tapgarden/mock.go @@ -553,6 +553,12 @@ func (m *MockProofArchive) FetchProof(ctx context.Context, return nil, nil } +func (m *MockProofArchive) HasProof(ctx context.Context, + id proof.Locator) (bool, error) { + + return false, nil +} + func (m *MockProofArchive) FetchProofs(ctx context.Context, id asset.ID) ([]*proof.AnnotatedProof, error) { From 2cd604e23cf9dacacd4367cc512e548cb3241b3f Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Wed, 13 Dec 2023 15:22:58 +0100 Subject: [PATCH 47/54] tapdb: turn MultiverseStore into NotifyArchiver With this commit we prepare the MultiverseStore to also act as a NotifyArchiver that can notify about new incoming proofs. --- tapdb/multiverse.go | 174 +++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 170 insertions(+), 4 deletions(-) diff --git a/tapdb/multiverse.go b/tapdb/multiverse.go index 12bbb81b8..edc62cb07 100644 --- a/tapdb/multiverse.go +++ b/tapdb/multiverse.go @@ -1,6 +1,7 @@ package tapdb import ( + "bytes" "context" "crypto/sha256" "database/sql" @@ -476,15 +477,23 @@ type MultiverseStore struct { proofCache *proofCache leafKeysCache *universeLeafCache + + // transferProofDistributor is an event distributor that will be used to + // notify subscribers about new proof leaves that are added to the + // multiverse. This is used to notify the custodian about new incoming + // proofs. And since the custodian is only interested in transfer + // proofs, we only signal on transfer proofs. + transferProofDistributor *fn.EventDistributor[proof.Blob] } // NewMultiverseStore creates a new multiverse DB store handle. func NewMultiverseStore(db BatchedMultiverse) *MultiverseStore { return &MultiverseStore{ - db: db, - rootNodeCache: newRootNodeCache(), - proofCache: newProofCache(), - leafKeysCache: newUniverseLeafCache(), + db: db, + rootNodeCache: newRootNodeCache(), + proofCache: newProofCache(), + leafKeysCache: newUniverseLeafCache(), + transferProofDistributor: fn.NewEventDistributor[proof.Blob](), } } @@ -850,6 +859,78 @@ func (b *MultiverseStore) FetchProofLeaf(ctx context.Context, return proofs, nil } +// FetchProof fetches a proof for an asset uniquely identified by the passed +// Locator. The returned blob contains the encoded full proof file, representing +// the complete provenance of the asset. +// +// If a proof cannot be found, then ErrProofNotFound is returned. +// +// NOTE: This is part of the proof.NotifyArchiver interface. +func (b *MultiverseStore) FetchProof(ctx context.Context, + originLocator proof.Locator) (proof.Blob, error) { + + // The universe only delivers a single proof at a time, so we need a + // callback that we can feed into proof.FetchProofProvenance to assemble + // the full proof file. + fetchProof := func(ctx context.Context, loc proof.Locator) (proof.Blob, + error) { + + uniID := universe.Identifier{ + AssetID: *loc.AssetID, + GroupKey: loc.GroupKey, + ProofType: universe.ProofTypeTransfer, + } + scriptKey := asset.NewScriptKey(&loc.ScriptKey) + leafKey := universe.LeafKey{ + ScriptKey: &scriptKey, + } + if loc.OutPoint != nil { + leafKey.OutPoint = *loc.OutPoint + } + + proofs, err := b.FetchProofLeaf(ctx, uniID, leafKey) + if errors.Is(err, universe.ErrNoUniverseProofFound) { + // If we didn't find a proof, maybe we arrived at the + // issuance proof, in which case we need to adjust the + // proof type. + uniID.ProofType = universe.ProofTypeIssuance + proofs, err = b.FetchProofLeaf(ctx, uniID, leafKey) + + // If we still didn't find a proof, then we'll return + // the proof not found error, but the one from the proof + // package, not the universe package, as the Godoc for + // this method in the proof.NotifyArchiver states. + if errors.Is(err, universe.ErrNoUniverseProofFound) { + return nil, proof.ErrProofNotFound + } + } + if err != nil { + return nil, fmt.Errorf("error fetching proof from "+ + "archive: %w", err) + } + + if len(proofs) > 1 { + return nil, fmt.Errorf("expected only one proof, "+ + "got %d", len(proofs)) + } + + return proofs[0].Leaf.RawProof, nil + } + + file, err := proof.FetchProofProvenance(ctx, originLocator, fetchProof) + if err != nil { + return nil, fmt.Errorf("error fetching proof from archive: %w", + err) + } + + var buf bytes.Buffer + if err := file.Encode(&buf); err != nil { + return nil, fmt.Errorf("error encoding proof file: %w", err) + } + + return buf.Bytes(), nil +} + // UpsertProofLeaf upserts a proof leaf within the multiverse tree and the // universe tree that corresponds to the given key. func (b *MultiverseStore) UpsertProofLeaf(ctx context.Context, @@ -944,6 +1025,14 @@ func (b *MultiverseStore) UpsertProofLeaf(ctx context.Context, b.proofCache.delProofsForAsset(id) b.leafKeysCache.wipeCache(idStr) + // Notify subscribers about the new proof leaf, now that we're sure we + // have written it to the database. But we only care about transfer + // proofs, as the events are received by the custodian to finalize + // inbound transfers. + if id.ProofType == universe.ProofTypeTransfer { + b.transferProofDistributor.NotifySubscribers(leaf.RawProof) + } + return issuanceProof, nil } @@ -1024,6 +1113,18 @@ func (b *MultiverseStore) UpsertProofLeafBatch(ctx context.Context, b.rootNodeCache.wipeCache() + // Notify subscribers about the new proof leaves, now that we're sure we + // have written them to the database. But we only care about transfer + // proofs, as the events are received by the custodian to finalize + // inbound transfers. + for idx := range items { + if items[idx].ID.ProofType == universe.ProofTypeTransfer { + b.transferProofDistributor.NotifySubscribers( + items[idx].Leaf.RawProof, + ) + } + } + // Invalidate the root node cache for all the assets we just inserted. idsToDelete := fn.NewSet(fn.Map(items, func(item *universe.Item) treeID { return treeID(item.ID.String()) @@ -1074,3 +1175,68 @@ func (b *MultiverseStore) DeleteUniverse(ctx context.Context, return id.String(), dbErr } + +// RegisterSubscriber adds a new subscriber for receiving events. The +// deliverExisting boolean indicates whether already existing items should be +// sent to the NewItemCreated channel when the subscription is started. An +// optional deliverFrom can be specified to indicate from which timestamp/index/ +// marker onward existing items should be delivered on startup. If deliverFrom +// is nil/zero/empty then all existing items will be delivered. +func (b *MultiverseStore) RegisterSubscriber( + receiver *fn.EventReceiver[proof.Blob], deliverExisting bool, + deliverFrom []*proof.Locator) error { + + b.transferProofDistributor.RegisterSubscriber(receiver) + + // No delivery of existing items requested, we're done here. + if !deliverExisting { + return nil + } + + ctx := context.Background() + for _, loc := range deliverFrom { + if loc.AssetID == nil { + return fmt.Errorf("missing asset ID") + } + + id := universe.Identifier{ + AssetID: *loc.AssetID, + GroupKey: loc.GroupKey, + ProofType: universe.ProofTypeTransfer, + } + scriptKey := asset.NewScriptKey(&loc.ScriptKey) + key := universe.LeafKey{ + ScriptKey: &scriptKey, + } + + if loc.OutPoint != nil { + key.OutPoint = *loc.OutPoint + } + + leaves, err := b.FetchProofLeaf(ctx, id, key) + if err != nil { + return err + } + + // Deliver the found leaves to the new item queue of the + // subscriber. + for idx := range leaves { + rawProof := leaves[idx].Leaf.RawProof + receiver.NewItemCreated.ChanIn() <- rawProof + } + } + + return nil +} + +// RemoveSubscriber removes the given subscriber and also stops it from +// processing events. +func (b *MultiverseStore) RemoveSubscriber( + subscriber *fn.EventReceiver[proof.Blob]) error { + + return b.transferProofDistributor.RemoveSubscriber(subscriber) +} + +// A compile-time interface to ensure MultiverseStore meets the +// proof.NotifyArchiver interface. +var _ proof.NotifyArchiver = (*MultiverseStore)(nil) From c628f3b893f1a48d8bad6aca34561db930988dfc Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Wed, 13 Dec 2023 15:22:59 +0100 Subject: [PATCH 48/54] mint: allow blob to represent both file and single proof Because the universe only deals with individual proof leaves, we want the custodian to be able to deal with both proof files and single proofs. To make it easy to distinguish and convert between the two, we add some helper methods to the proof.Blob type. --- proof/mint.go | 79 ++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 78 insertions(+), 1 deletion(-) diff --git a/proof/mint.go b/proof/mint.go index 8c73372e2..737ff5014 100644 --- a/proof/mint.go +++ b/proof/mint.go @@ -11,9 +11,86 @@ import ( "github.com/lightninglabs/taproot-assets/commitment" ) -// Blob represents a serialized proof file, including the checksum. +// Blob either represents a serialized proof file, including the checksum or a +// single serialized issuance/transition proof. Which one it is can be found out +// from the leading magic bytes (or the helper methods that inspect those). type Blob []byte +// IsFile returns true if the blob is a serialized proof file. +func (b Blob) IsFile() bool { + return IsProofFile(b) +} + +// IsSingleProof returns true if the blob is a serialized single proof. +func (b Blob) IsSingleProof() bool { + return IsSingleProof(b) +} + +// AsFile returns the blob as a parsed file. If the blob is a single proof, it +// will be parsed as a file with a single proof. +func (b Blob) AsFile() (*File, error) { + switch { + // We have a full file, we can just parse it and return it. + case b.IsFile(): + file := NewEmptyFile(V0) + if err := file.Decode(bytes.NewReader(b)); err != nil { + return nil, fmt.Errorf("error decoding proof file: %w", + err) + } + + return file, nil + + // We have a single proof, so let's parse it and return it directly, + // assuming it is the most recent proof the caller is interested in. + case b.IsSingleProof(): + p := Proof{} + if err := p.Decode(bytes.NewReader(b)); err != nil { + return nil, fmt.Errorf("error decoding single proof: "+ + "%w", err) + } + + file, err := NewFile(V0, p) + if err != nil { + return nil, err + } + + return file, nil + + default: + return nil, fmt.Errorf("unknown proof blob type") + } +} + +// AsSingleProof returns the blob as a parsed single proof. If the blob is a +// full proof file, the parsed last proof of that file will be returned. +func (b Blob) AsSingleProof() (*Proof, error) { + switch { + // We have a full file, we can just parse it and return it. + case b.IsFile(): + file := NewEmptyFile(V0) + if err := file.Decode(bytes.NewReader(b)); err != nil { + return nil, fmt.Errorf("error decoding proof file: %w", + err) + } + + return file.LastProof() + + // We have a single proof, so let's parse it and return it directly, + // assuming it is the most recent proof the caller is interested in. + case b.IsSingleProof(): + p := Proof{} + if err := p.Decode(bytes.NewReader(b)); err != nil { + return nil, fmt.Errorf("error decoding single proof: "+ + "%w", err) + } + + return &p, nil + + default: + return nil, fmt.Errorf("unknown proof blob type") + } +} + // AssetBlobs is a data structure used to pass around the proof files for a // set of assets which may have been created in the same batched transaction. // This maps the script key of the asset to the serialized proof file blob. From d8b3111af11ba32b5f10a3106b0463e836c73c3b Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Wed, 13 Dec 2023 15:23:00 +0100 Subject: [PATCH 49/54] fn: add generic functions for maps --- fn/func.go | 50 ++++++++++++++++++++++++++++++++++++++++++++++++++ fn/iter.go | 28 +++++++++++++++++++++------- 2 files changed, 71 insertions(+), 7 deletions(-) diff --git a/fn/func.go b/fn/func.go index 0514fd230..6880e65db 100644 --- a/fn/func.go +++ b/fn/func.go @@ -130,6 +130,18 @@ func All[T any](xs []T, pred func(T) bool) bool { return true } +// AllMapItems returns true if the passed predicate returns true for all items +// in the map. +func AllMapItems[T any, K comparable](xs map[K]T, pred func(T) bool) bool { + for i := range xs { + if !pred(xs[i]) { + return false + } + } + + return true +} + // Any returns true if the passed predicate returns true for any item in the // slice. func Any[T any](xs []T, pred func(T) bool) bool { @@ -148,6 +160,30 @@ func None[T any](xs []T, pred func(T) bool) bool { return !Any(xs, pred) } +// AnyMapItem returns true if the passed predicate returns true for any item in +// the map. +func AnyMapItem[T any, K comparable](xs map[K]T, pred func(T) bool) bool { + for i := range xs { + if pred(xs[i]) { + return true + } + } + + return false +} + +// NotAny returns true if the passed predicate returns false for all items in +// the slice. +func NotAny[T any](xs []T, pred func(T) bool) bool { + return !Any(xs, pred) +} + +// NotAnyMapItem returns true if the passed predicate returns false for all +// items in the map. +func NotAnyMapItem[T any, K comparable](xs map[K]T, pred func(T) bool) bool { + return !AnyMapItem(xs, pred) +} + // Count returns the number of items in the slice that match the predicate. func Count[T any](xs []T, pred func(T) bool) int { var count int @@ -161,6 +197,20 @@ func Count[T any](xs []T, pred func(T) bool) int { return count } +// CountMapItems returns the number of items in the map that match the +// predicate. +func CountMapItems[T any, K comparable](xs map[K]T, pred func(T) bool) int { + var count int + + for i := range xs { + if pred(xs[i]) { + count++ + } + } + + return count +} + // First returns the first item in the slice that matches the predicate, or an // error if none matches. func First[T any](xs []*T, pred func(*T) bool) (*T, error) { diff --git a/fn/iter.go b/fn/iter.go index 9e7c8dc23..c82d41639 100644 --- a/fn/iter.go +++ b/fn/iter.go @@ -7,10 +7,8 @@ package fn // This function can be used instead of the normal range loop to ensure that a // loop scoping bug isn't introduced. func ForEachErr[T any](s []T, f func(T) error) error { - for _, item := range s { - item := item - - if err := f(item); err != nil { + for i := range s { + if err := f(s[i]); err != nil { return err } } @@ -22,9 +20,17 @@ func ForEachErr[T any](s []T, f func(T) error) error { // This can be used to ensure that any normal for-loop don't run into bugs due // to loop variable scoping. func ForEach[T any](items []T, f func(T)) { - for _, item := range items { - item := item - f(item) + for i := range items { + f(items[i]) + } +} + +// ForEachMapItem is a generic implementation of a for-each (map with side +// effects). This can be used to ensure that any normal for-loop don't run into +// bugs due to loop variable scoping. +func ForEachMapItem[T any, K comparable](items map[K]T, f func(T)) { + for i := range items { + f(items[i]) } } @@ -38,6 +44,14 @@ func Enumerate[T any](items []T, f func(int, T)) { } } +// EnumerateMap is a generic enumeration function. The closure will be called +// for each key and item in the passed-in map. +func EnumerateMap[T any, K comparable](items map[K]T, f func(K, T)) { + for key := range items { + f(key, items[key]) + } +} + // MakeSlice is a generic function shorthand for making a slice out of a set // of elements. This can be used to avoid having to specify the type of the // slice as well as the types of the elements. From d8229a8b41e85b6afea0c3912828e066bc14bfb0 Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Wed, 13 Dec 2023 15:23:01 +0100 Subject: [PATCH 50/54] tapgarden: switch custodian over to updated interface Since we now might get either a full proof file or just a transition proof, the custodian needs to be updated to be able to deal with both. --- tapgarden/custodian.go | 136 +++++++++++++++++++++++++++++++++++------ 1 file changed, 118 insertions(+), 18 deletions(-) diff --git a/tapgarden/custodian.go b/tapgarden/custodian.go index 8d5b6b352..a30f7eb8c 100644 --- a/tapgarden/custodian.go +++ b/tapgarden/custodian.go @@ -1,7 +1,6 @@ package tapgarden import ( - "bytes" "errors" "fmt" "strings" @@ -626,6 +625,7 @@ func (c *Custodian) checkProofAvailable(event *address.Event) (bool, error) { AssetID: fn.Ptr(event.Addr.AssetID), GroupKey: event.Addr.GroupKey, ScriptKey: event.Addr.ScriptKey, + OutPoint: &event.Outpoint, }) switch { case errors.Is(err, proof.ErrProofNotFound): @@ -636,15 +636,23 @@ func (c *Custodian) checkProofAvailable(event *address.Event) (bool, error) { err) } - file := proof.NewEmptyFile(proof.V0) - if err := file.Decode(bytes.NewReader(blob)); err != nil { - return false, fmt.Errorf("error decoding proof file: %w", err) + // At this point, we expect the proof to be a full file, containing the + // whole provenance chain (as required by implementers of the + // proof.NotifyArchiver.FetchProof() method). So if we don't we can't + // continue. + if !blob.IsFile() { + return false, fmt.Errorf("expected proof to be a full file, " + + "but got something else") + } + + file, err := blob.AsFile() + if err != nil { + return false, fmt.Errorf("error extracting proof file: %w", err) } // Exit early on empty proof (shouldn't happen outside of test cases). if file.IsEmpty() { - return false, fmt.Errorf("archive contained empty proof file: "+ - "%w", err) + return false, fmt.Errorf("archive contained empty proof file") } lastProof, err := file.LastProof() @@ -665,9 +673,92 @@ func (c *Custodian) checkProofAvailable(event *address.Event) (bool, error) { // and pending address event. If a proof successfully matches the desired state // of the address, that completes the inbound transfer of an asset. func (c *Custodian) mapProofToEvent(p proof.Blob) error { - file := proof.NewEmptyFile(proof.V0) - if err := file.Decode(bytes.NewReader(p)); err != nil { - return fmt.Errorf("error decoding proof file: %w", err) + // We arrive here if we are notified about a new proof. The notification + // interface allows that proof to be a single transition proof. So if + // we don't have a full file yet, we need to fetch it now. The + // proof.NotifyArchiver.FetchProof() method will return the full file as + // per its Godoc. + var ( + proofBlob = p + lastProof *proof.Proof + err error + ) + if !p.IsFile() { + log.Debugf("Received single proof, inspecting if matches event") + lastProof, err = p.AsSingleProof() + if err != nil { + return fmt.Errorf("error decoding proof: %w", err) + } + + // Before we go ahead and fetch the full file, let's make sure + // we are actually interested in this proof. We need to do this + // because we receive all transfer proofs inserted into the + // local universe here. So they could just be from a proof sync + // run and not actually be for an address we are interested in. + haveMatchingEvents := fn.AnyMapItem( + c.events, func(e *address.Event) bool { + return EventMatchesProof(e, lastProof) + }, + ) + if !haveMatchingEvents { + log.Debugf("Proof doesn't match any events, skipping.") + return nil + } + + ctxt, cancel := c.WithCtxQuit() + defer cancel() + + loc := proof.Locator{ + AssetID: fn.Ptr(lastProof.Asset.ID()), + ScriptKey: *lastProof.Asset.ScriptKey.PubKey, + OutPoint: fn.Ptr(lastProof.OutPoint()), + } + if lastProof.Asset.GroupKey != nil { + loc.GroupKey = &lastProof.Asset.GroupKey.GroupPubKey + } + + log.Debugf("Received single proof, fetching full file") + proofBlob, err = c.cfg.ProofNotifier.FetchProof(ctxt, loc) + if err != nil { + return fmt.Errorf("error fetching full proof file for "+ + "event: %w", err) + } + + // Do we already have this proof in our main archive? This + // should only be false if we got the notification from our + // local universe instead of the local proof archive (which the + // couriers use). This is mainly an optimization to make sure we + // don't unnecessarily overwrite the proofs in our main archive. + haveProof, err := c.cfg.ProofArchive.HasProof(ctxt, loc) + if err != nil { + return fmt.Errorf("error checking if proof is "+ + "available: %w", err) + } + + // We don't have the proof yet, or not in all backends, so we + // need to import it now. + if !haveProof { + headerVerifier := GenHeaderVerifier( + ctxt, c.cfg.ChainBridge, + ) + err = c.cfg.ProofArchive.ImportProofs( + ctxt, headerVerifier, c.cfg.GroupVerifier, + false, &proof.AnnotatedProof{ + Locator: loc, + Blob: proofBlob, + }, + ) + if err != nil { + return fmt.Errorf("error importing proof "+ + "file into main archive: %w", err) + } + } + } + + // Now we can be sure we have a file. + file, err := proofBlob.AsFile() + if err != nil { + return fmt.Errorf("error extracting proof file: %w", err) } // Exit early on empty proof (shouldn't happen outside of test cases). @@ -678,19 +769,22 @@ func (c *Custodian) mapProofToEvent(p proof.Blob) error { // We got the proof from the multi archiver, which verifies it before // giving it to us. So we don't have to verify them again and can - // directly look at the last state. - lastProof, err := file.LastProof() - if err != nil { - return fmt.Errorf("error fetching last proof: %w", err) + // directly look at the last state. We can skip extracting the last + // proof if we started out with a single proof in the first place, which + // we already parsed above. + if lastProof == nil { + lastProof, err = file.LastProof() + if err != nil { + return fmt.Errorf("error fetching last proof: %w", err) + } } - log.Infof("Received new proof file, version=%d, num_proofs=%d", - file.Version, file.NumProofs()) + log.Infof("Received new proof file for asset ID %s, version=%d,"+ + "num_proofs=%d", lastProof.Asset.ID().String(), file.Version, + file.NumProofs()) // Check if any of our in-flight events match the last proof's state. for _, event := range c.events { - if AddrMatchesAsset(event.Addr, &lastProof.Asset) && - event.Outpoint == lastProof.OutPoint() { - + if EventMatchesProof(event, lastProof) { // Importing a proof already creates the asset in the // database. Therefore, all we need to do is update the // state of the address event to mark it as completed @@ -844,3 +938,9 @@ func AddrMatchesAsset(addr *address.AddrWithKeyInfo, a *asset.Asset) bool { return addr.AssetID == a.ID() && groupKeyEqual && addr.ScriptKey.IsEqual(a.ScriptKey.PubKey) } + +// EventMatchesProof returns true if the given event matches the given proof. +func EventMatchesProof(event *address.Event, p *proof.Proof) bool { + return AddrMatchesAsset(event.Addr, &p.Asset) && + event.Outpoint == p.OutPoint() +} From 1de40c65c1d577a136e98f8862077eb7ff9c6b8e Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Wed, 13 Dec 2023 15:23:02 +0100 Subject: [PATCH 51/54] proof+tapcfg: add and use new MultiArchiveNotifier This commit creates a new notifier that can relay registrations for new proofs to multiple notifier/archiver backends. We then use that to pass in both the local assets store as well as the multiverse store to the custodian to detect incoming proofs. --- proof/archive.go | 87 ++++++++++++++++++++++++++++++++++++++++++++++++ tapcfg/server.go | 4 ++- 2 files changed, 90 insertions(+), 1 deletion(-) diff --git a/proof/archive.go b/proof/archive.go index ff02c5b24..9530a616f 100644 --- a/proof/archive.go +++ b/proof/archive.go @@ -140,6 +140,93 @@ type NotifyArchiver interface { fn.EventPublisher[Blob, []*Locator] } +// MultiArchiveNotifier is a NotifyArchiver that wraps several other archives +// and notifies subscribers about new proofs that are added to any of the +// archives. +type MultiArchiveNotifier struct { + archives []NotifyArchiver +} + +// NewMultiArchiveNotifier creates a new MultiArchiveNotifier based on the set +// of specified backends. +func NewMultiArchiveNotifier(archives ...NotifyArchiver) *MultiArchiveNotifier { + return &MultiArchiveNotifier{ + archives: archives, + } +} + +// FetchProof fetches a proof for an asset uniquely identified by the passed +// Identifier. The returned proof can either be a full proof file or just a +// single proof. +// +// If a proof cannot be found, then ErrProofNotFound should be returned. +// +// NOTE: This is part of the NotifyArchiver interface. +func (m *MultiArchiveNotifier) FetchProof(ctx context.Context, + id Locator) (Blob, error) { + + for idx := range m.archives { + a := m.archives[idx] + + proofBlob, err := a.FetchProof(ctx, id) + if errors.Is(err, ErrProofNotFound) { + // Try the next archive. + continue + } else if err != nil { + return nil, fmt.Errorf("error fetching proof "+ + "from archive: %w", err) + } + + return proofBlob, nil + } + + return nil, ErrProofNotFound +} + +// RegisterSubscriber adds a new subscriber for receiving events. The +// registration request is forwarded to all registered archives. +func (m *MultiArchiveNotifier) RegisterSubscriber( + receiver *fn.EventReceiver[Blob], deliverExisting bool, + deliverFrom []*Locator) error { + + for idx := range m.archives { + a := m.archives[idx] + + err := a.RegisterSubscriber( + receiver, deliverExisting, deliverFrom, + ) + if err != nil { + return fmt.Errorf("error registering subscriber: %w", + err) + } + } + + return nil +} + +// RemoveSubscriber removes the given subscriber and also stops it from +// processing events. The removal request is forwarded to all registered +// archives. +func (m *MultiArchiveNotifier) RemoveSubscriber( + subscriber *fn.EventReceiver[Blob]) error { + + for idx := range m.archives { + a := m.archives[idx] + + err := a.RemoveSubscriber(subscriber) + if err != nil { + return fmt.Errorf("error removing subscriber: "+ + "%w", err) + } + } + + return nil +} + +// A compile-time interface to ensure MultiArchiveNotifier meets the +// NotifyArchiver interface. +var _ NotifyArchiver = (*MultiArchiveNotifier)(nil) + // FileArchiver implements proof Archiver backed by an on-disk file system. The // archiver takes a single root directory then creates the following overlap // mapping: diff --git a/tapcfg/server.go b/tapcfg/server.go index 1020d2e24..862de4653 100644 --- a/tapcfg/server.go +++ b/tapcfg/server.go @@ -327,6 +327,8 @@ func genServerConfig(cfg *Config, cfgLogger btclog.Logger, TransferLog: assetStore, }) + multiNotifier := proof.NewMultiArchiveNotifier(assetStore, multiverse) + return &tap.Config{ DebugLevel: cfg.DebugLevel, RuntimeID: runtimeID, @@ -361,7 +363,7 @@ func genServerConfig(cfg *Config, cfgLogger btclog.Logger, ), AddrBook: addrBook, ProofArchive: proofArchive, - ProofNotifier: assetStore, + ProofNotifier: multiNotifier, ErrChan: mainErrChan, ProofCourierDispatcher: proofCourierDispatcher, ProofRetrievalDelay: cfg.CustodianProofRetrievalDelay, ProofWatcher: reOrgWatcher, From 13edbe05d8fe976cb068d32be03206a44c9a94e3 Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Wed, 13 Dec 2023 15:23:04 +0100 Subject: [PATCH 52/54] itest: add test for local universe proof delivery --- itest/addrs_test.go | 82 ++++++++++++++++++++++++++++++++++++++ itest/send_test.go | 61 ++++++++++++++++++++++++++++ itest/test_list_on_test.go | 4 ++ 3 files changed, 147 insertions(+) diff --git a/itest/addrs_test.go b/itest/addrs_test.go index 657cafc64..b03ddfbc9 100644 --- a/itest/addrs_test.go +++ b/itest/addrs_test.go @@ -8,6 +8,7 @@ import ( tap "github.com/lightninglabs/taproot-assets" "github.com/lightninglabs/taproot-assets/fn" "github.com/lightninglabs/taproot-assets/internal/test" + "github.com/lightninglabs/taproot-assets/proof" "github.com/lightninglabs/taproot-assets/tappsbt" "github.com/lightninglabs/taproot-assets/taprpc" wrpc "github.com/lightninglabs/taproot-assets/taprpc/assetwalletrpc" @@ -512,6 +513,8 @@ func runMultiSendTest(ctxt context.Context, t *harnessTest, alice, require.NoError(t.t, err) } +// sendProof manually exports a proof from the given source node and imports it +// using the development only ImportProof RPC on the destination node. func sendProof(t *harnessTest, src, dst *tapdHarness, scriptKey []byte, genInfo *taprpc.GenesisInfo) *tapdevrpc.ImportProofResponse { @@ -543,6 +546,85 @@ func sendProof(t *harnessTest, src, dst *tapdHarness, scriptKey []byte, return importResp } +// sendProofUniRPC manually exports a proof from the given source node and +// imports it using the universe related InsertProof RPC on the destination +// node. +func sendProofUniRPC(t *harnessTest, src, dst *tapdHarness, scriptKey []byte, + genInfo *taprpc.GenesisInfo) *unirpc.AssetProofResponse { + + ctxb := context.Background() + + var proofResp *taprpc.ProofFile + waitErr := wait.NoError(func() error { + resp, err := src.ExportProof(ctxb, &taprpc.ExportProofRequest{ + AssetId: genInfo.AssetId, + ScriptKey: scriptKey, + }) + if err != nil { + return err + } + + proofResp = resp + return nil + }, defaultWaitTimeout) + require.NoError(t.t, waitErr) + + t.Logf("Importing proof %x using InsertProof", proofResp.RawProofFile) + + f := proof.File{} + err := f.Decode(bytes.NewReader(proofResp.RawProofFile)) + require.NoError(t.t, err) + + lastProof, err := f.LastProof() + require.NoError(t.t, err) + + var lastProofBytes bytes.Buffer + err = lastProof.Encode(&lastProofBytes) + require.NoError(t.t, err) + asset := lastProof.Asset + + proofType := universe.ProofTypeTransfer + if asset.IsGenesisAsset() { + proofType = universe.ProofTypeIssuance + } + + uniID := universe.Identifier{ + AssetID: asset.ID(), + ProofType: proofType, + } + if asset.GroupKey != nil { + uniID.GroupKey = &asset.GroupKey.GroupPubKey + } + + rpcUniID, err := tap.MarshalUniID(uniID) + require.NoError(t.t, err) + + outpoint := &unirpc.Outpoint{ + HashStr: lastProof.AnchorTx.TxHash().String(), + Index: int32(lastProof.InclusionProof.OutputIndex), + } + + importResp, err := dst.InsertProof(ctxb, &unirpc.AssetProof{ + Key: &unirpc.UniverseKey{ + Id: rpcUniID, + LeafKey: &unirpc.AssetKey{ + Outpoint: &unirpc.AssetKey_Op{ + Op: outpoint, + }, + ScriptKey: &unirpc.AssetKey_ScriptKeyBytes{ + ScriptKeyBytes: scriptKey, + }, + }, + }, + AssetLeaf: &unirpc.AssetLeaf{ + Proof: lastProofBytes.Bytes(), + }, + }) + require.NoError(t.t, err) + + return importResp +} + // sendAssetsToAddr spends the given input asset and sends the amount specified // in the address to the Taproot output derived from the address. func sendAssetsToAddr(t *harnessTest, sender *tapdHarness, diff --git a/itest/send_test.go b/itest/send_test.go index ca709666f..5fdb8603c 100644 --- a/itest/send_test.go +++ b/itest/send_test.go @@ -1374,6 +1374,67 @@ func testSendMultipleCoins(t *harnessTest) { AssertNonInteractiveRecvComplete(t.t, secondTapd, 5) } +// testSendNoCourierUniverseImport tests that we can send assets to a node that +// has no courier, and then manually transfer the proof to the receiving using +// the universe proof import RPC method. +func testSendNoCourierUniverseImport(t *harnessTest) { + ctxb := context.Background() + + // First, we'll make a normal assets with enough units. + rpcAssets := MintAssetsConfirmBatch( + t.t, t.lndHarness.Miner.Client, t.tapd, + []*mintrpc.MintAssetRequest{simpleAssets[0]}, + ) + + firstAsset := rpcAssets[0] + genInfo := firstAsset.AssetGenesis + + // Now that we have the asset created, we'll make a new node that'll + // serve as the node which'll receive the assets. We turn off the proof + // courier by supplying a dummy implementation. + secondTapd := setupTapdHarness( + t.t, t, t.lndHarness.Bob, t.universeServer, + func(params *tapdHarnessParams) { + params.proofCourier = &proof.MockProofCourier{} + }, + ) + defer func() { + require.NoError(t.t, secondTapd.stop(!*noDelete)) + }() + + // Next, we'll attempt to transfer some amount of assets[0] to the + // receiving node. + numUnitsSend := uint64(1200) + + // Get a new address (which accepts the first asset) from the + // receiving node. + receiveAddr, err := secondTapd.NewAddr(ctxb, &taprpc.NewAddrRequest{ + AssetId: genInfo.AssetId, + Amt: numUnitsSend, + }) + require.NoError(t.t, err) + AssertAddrCreated(t.t, secondTapd, firstAsset, receiveAddr) + + // Send the assets to the receiving node. + sendResp := sendAssetsToAddr(t, t.tapd, receiveAddr) + + // Assert that the outbound transfer was confirmed. + expectedAmtAfterSend := firstAsset.Amount - numUnitsSend + ConfirmAndAssertOutboundTransfer( + t.t, t.lndHarness.Miner.Client, t.tapd, sendResp, + genInfo.AssetId, + []uint64{expectedAmtAfterSend, numUnitsSend}, 0, 1, + ) + + // Since we disabled proof couriers, we need to manually transfer the + // proof from the sender to the receiver now. We use the universe RPC + // InsertProof method to do this. + sendProofUniRPC(t, t.tapd, secondTapd, receiveAddr.ScriptKey, genInfo) + + // And now, the transfer should be completed on the receiver side too. + AssertNonInteractiveRecvComplete(t.t, secondTapd, 1) +} + // addProofTestVectorFromFile adds a proof test vector by extracting it from the // proof file found at the given asset ID and script key. func addProofTestVectorFromFile(t *testing.T, testName string, diff --git a/itest/test_list_on_test.go b/itest/test_list_on_test.go index 9d4d6a582..c11c9d43d 100644 --- a/itest/test_list_on_test.go +++ b/itest/test_list_on_test.go @@ -76,6 +76,10 @@ var testCases = []*testCase{ test: testOfflineReceiverEventuallyReceives, proofCourierType: proof.HashmailCourierType, }, + { + name: "addr send no proof courier with local universe import", + test: testSendNoCourierUniverseImport, + }, { name: "basic send passive asset", test: testBasicSendPassiveAsset, From d34256d66e4502a251add76e28416a5d62ced829 Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Wed, 20 Dec 2023 16:28:02 +0100 Subject: [PATCH 53/54] itest: test side loading of issuance proof --- itest/test_list_on_test.go | 4 ++ itest/universe_test.go | 82 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 86 insertions(+) diff --git a/itest/test_list_on_test.go b/itest/test_list_on_test.go index c11c9d43d..00fbe7914 100644 --- a/itest/test_list_on_test.go +++ b/itest/test_list_on_test.go @@ -179,6 +179,10 @@ var testCases = []*testCase{ name: "universe sync", test: testUniverseSync, }, + { + name: "universe sync manual insert", + test: testUniverseManualSync, + }, { name: "universe federation", test: testUniverseFederation, diff --git a/itest/universe_test.go b/itest/universe_test.go index 5f9fe5b19..55795b201 100644 --- a/itest/universe_test.go +++ b/itest/universe_test.go @@ -263,6 +263,88 @@ func testUniverseSync(t *harnessTest) { ) } +// testUniverseManualSync tests that we're able to insert proofs manually into +// a universe instead of using a full sync. +func testUniverseManualSync(t *harnessTest) { + miner := t.lndHarness.Miner.Client + + // First, we'll create out usual set of issuable assets. + rpcIssuableAssets := MintAssetsConfirmBatch( + t.t, miner, t.tapd, issuableAssets, + ) + + // With those assets created, we'll now create a new node that we'll + // use to exercise the manual Universe sync. + bob := setupTapdHarness( + t.t, t, t.lndHarness.Bob, t.universeServer, + func(params *tapdHarnessParams) { + params.noDefaultUniverseSync = true + }, + ) + defer func() { + require.NoError(t.t, bob.stop(!*noDelete)) + }() + + ctxb := context.Background() + ctxt, cancel := context.WithTimeout(ctxb, defaultWaitTimeout) + defer cancel() + + // We now side load the issuance proof of our first asset into Bob's + // universe. + firstAsset := rpcIssuableAssets[0] + firstAssetGen := firstAsset.AssetGenesis + sendProofUniRPC(t, t.tapd, bob, firstAsset.ScriptKey, firstAssetGen) + + // We should also be able to fetch an asset from Bob's Universe, and + // query for that asset with the compressed script key. + firstOutpoint, err := tap.UnmarshalOutpoint( + firstAsset.ChainAnchor.AnchorOutpoint, + ) + require.NoError(t.t, err) + + firstAssetProofQuery := unirpc.UniverseKey{ + Id: &unirpc.ID{ + Id: &unirpc.ID_GroupKey{ + GroupKey: firstAsset.AssetGroup.TweakedGroupKey, + }, + ProofType: unirpc.ProofType_PROOF_TYPE_ISSUANCE, + }, + LeafKey: &unirpc.AssetKey{ + Outpoint: &unirpc.AssetKey_Op{ + Op: &unirpc.Outpoint{ + HashStr: firstOutpoint.Hash.String(), + Index: int32(firstOutpoint.Index), + }, + }, + ScriptKey: &unirpc.AssetKey_ScriptKeyBytes{ + ScriptKeyBytes: firstAsset.ScriptKey, + }, + }, + } + + // We should now be able to query for the asset proof. + _, err = bob.QueryProof(ctxt, &firstAssetProofQuery) + require.NoError(t.t, err) + + // We should now also be able to fetch the meta data and group key for + // the asset. + metaData, err := bob.FetchAssetMeta(ctxt, &taprpc.FetchAssetMetaRequest{ + Asset: &taprpc.FetchAssetMetaRequest_MetaHash{ + MetaHash: firstAssetGen.MetaHash, + }, + }) + require.NoError(t.t, err) + require.Equal(t.t, firstAssetGen.MetaHash, metaData.MetaHash) + + // We should be able to create a new address for the asset, since that + // requires us to know the full genesis and group key. + _, err = bob.NewAddr(ctxt, &taprpc.NewAddrRequest{ + AssetId: firstAssetGen.AssetId, + Amt: 500, + }) + require.NoError(t.t, err) +} + // unmarshalMerkleSumNode un-marshals a protobuf MerkleSumNode. func unmarshalMerkleSumNode(root *unirpc.MerkleSumNode) mssmt.Node { var nodeHash mssmt.NodeHash From d36e8446b8568e9a131dbd8f0e09052d30fdc39e Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Fri, 26 Jan 2024 15:05:41 -0800 Subject: [PATCH 54/54] build: bump version to v0.3.3-alpha.rc1 --- version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version.go b/version.go index 2bb292896..c3e6eb1d1 100644 --- a/version.go +++ b/version.go @@ -49,7 +49,7 @@ const ( // AppPreRelease MUST only contain characters from semanticAlphabet // per the semantic versioning spec. - AppPreRelease = "alpha" + AppPreRelease = "alpha.rc1" // defaultAgentName is the default name of the software that is added as // the first part of the user agent string.